text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
""" This module helps customize the mayavi install. It tries to import
any `site_mayavi.py` (anywhere on `sys.path`) or `user_mayavi.py`. The
`user_mayavi.py` script is found in the users `~/.mayavi2` directory
and this directory is also injected into the path.
It is the users responsibility to import the mayavi registry
(mayavi.registry:registry) and register any new modules or
filters into mayavi using suitable metadata.
If the user desires to contribute any plugins then they may expose a
function called `get_plugins()` which returns a list of plugins that they
wish to add to the default mayavi envisage app. The user may expose one
set of global plugins in the `site_mayavi` module and another in the
`user_mayavi` module without any problems.
The function `get_custom_plugins` returns a list of all the available
custom plugins.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008-2015, Prabhu Ramachandran, Enthought, Inc.
# License: BSD Style.
from __future__ import print_function
# Standard library imports.
import sys
import traceback
from os.path import join, exists
# Enthought library imports.
from traits.util.home_directory import get_home_directory
from mayavi.preferences.api import preference_manager
# The functions that return the plugins.
_get_global_plugins = lambda: []
_get_user_plugins = lambda: []
# First try the global mayavi customizations.
try:
# This will import site_mayavi, so any plugin registrations done
# there will be run.
from site_mayavi import get_plugins as _get_global_plugins
except ImportError:
pass
# Now do any local user level customizations.
#
# The following code obtains any customizations and that are imported
# from a `user_mayavi.py` provided by the user in their `~/.mayavi2`
# directory.
#
# Note that `~/.mayavi2` is placed in `sys.path` so make sure that you
# choose your module names carefully (so as not to override any common
# module names).
home = get_home_directory()
m2dir = join(home, '.mayavi2')
user_module = join(m2dir, 'user_mayavi.py')
if exists(user_module):
# Add ~/.mayavi2 to sys.path.
sys.path.append(m2dir)
# Doing an import gives user information on any errors.
import user_mayavi
try:
# Now try and import the user defined plugin extension.
from user_mayavi import get_plugins as _get_user_plugins
except ImportError:
# user_mayavi may not be adding any new plugins.
pass
# Now handle any contributions that the user has chosen via the
# preferences.
def _import_contrib(pkg):
mod = None
try:
components = pkg.split('.')
if len(components) > 1:
mod_name = '.'.join(components[:-1])
sym_name = components[-1]
mod = __import__(mod_name, globals(), locals(), [sym_name], level=0)
mod = getattr(mod, sym_name)
else:
mod_name = components[0]
mod = __import__(mod_name, globals(), locals(), [mod_name], level=0)
except Exception:
print("*"*80)
traceback.print_exc(file=sys.stdout)
print("*"*80)
return mod
def add_contributions():
"""Import any contributions that the user has selected via
preferences."""
for pkg in preference_manager.root.contrib_packages:
_import_contrib(pkg + '.user_mayavi')
def get_contrib_plugins():
"""Get plugins requested by different contributions."""
plugins = []
for pkg in preference_manager.root.contrib_packages:
mod = _import_contrib(pkg + '.user_mayavi')
if mod is not None and hasattr(mod, 'get_plugins'):
plugins.extend(mod.get_plugins())
return plugins
# Import the contributions.
add_contributions()
def get_custom_plugins():
"""Convenience function that returns all customization plugins as a
list.
"""
return _get_global_plugins() + _get_user_plugins() + \
get_contrib_plugins()
|
dmsurti/mayavi
|
mayavi/core/customize.py
|
Python
|
bsd-3-clause
| 3,952
|
[
"Mayavi"
] |
d71579ea9a173cd18e8183d0706cd6de6adffe615a1170988a7fdf3a93d26ee7
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
cyl0 = chigger.geometric.LineSource(point1=[0,0,0], point2=[0,1,0], data=[1, 2, 4, 8, 16], cmap='viridis')
cyls = chigger.base.ChiggerResult(cyl0)
window = chigger.RenderWindow(cyls, size=[300,300], test=True)
window.write('line_source_data.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/geometric/line_source/line_source_data.py
|
Python
|
lgpl-2.1
| 645
|
[
"MOOSE"
] |
7d10250b5891896b7263d82c6b88e87215e308f31ae186b19bb3e570252c9531
|
#%% addViscous.py
"""
Viscous correction code to augment Cart3D solutions.
Alexander Ward
April 2017
"""
#http://www.dtic.mil/dtic/tr/fulltext/u2/a045367.pdf
from math import exp, log, asin, sin
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import brentq
import time as time
#%% Inputs
"""
Here are the majority of the user defined parameters. They are grouped into:
Simulation - Parameters on the simulation itself
Freestream - Freestream gas properties
Transition - Parameters controlling how you want to treat transition
Vehicle - Reference area and length, wall temperature
Post Processing - Properties to save/report, slices to take
"""
""" -- SIMULATION -- """
#------------
# Cart3D solution filename {str}
cart3DFilename = 'Components.i.triq'
#------------
# Streamline filename {str}
streamlineFilename = '4.csv'
#------------
# Number of Tecplot generated streamlines {int}
nStreamlines = 68
#------------
# maximum number of steps in the the Tecplot streamlines {int}
maxSteps = 19999
#------------
# If you don't have streamlines available, set to 0 to guess the development
# length based on flow direction (positive x) NOTE: If the vehicle starts in
# negative x, you must set an offset to translate the nose to x = 0.
streamlineFlag = 0 # {bool}
LEoffset = 0. # {float}
#------------
# If you want lots of info to print out, set this to true, {bool}
verbose = 0
#------------
# If you just want to debug, tell the code to only iterate over part of the
# .triq file. Make sure to set the flag as well.
sampleAmount = 5000 # {int}
sampleFlag = 1 # {bool}
""" -- FREESTREAM -- """
#------------
# Freestream temperature, [K], {float}
T_inf = 62.157
#------------
# Freestream pressure, [Pa], {float}
p_inf = 3163.4
#------------
# Freestream density, [kg/m^3], {float}
rho_inf = 0.177268215
#------------
# Freestream Mach number, {float}
M_inf = 4.5099
#------------
# Ratio of specific heats, [Pa] (constant), {float}
g_inf = 1.4
#------------
# Angle of attack
alpha = 0. * np.pi/180
#------------
# Sideslip angle
beta = 0. * np.pi/180
#------------
""" -- TRANSITION -- """
#------------
# Set the point of transition (Re or streamline based length) OR set to False
# to use correlation.
criticalLocation = 0
#------------
# Set to 'laminar' or 'turbulent' if you want to assume and ignore transition
regime = 'turbulent'
#------------
# Roughness induced transition location (streamline coordinate)
roughnessCoordinate = 0
#------------
# Set true to have no transitional region (i.e. fully turbulent when tripped).
immediateTransitionFlag = 1
#------------
""" -- VEHICLE -- """
#------------
# Wall temperature of the vehicle surface [K], {float}
T_wall = 316.2
#------------
# Reference area for aerodynamic force coefficients
S_ref = 3.
#------------
# Reference length for aerodynamic moment coefficients (measured from nose)
L_ref = 1.5
#------------
""" -- POST PROCESSING -- """
#------------
# If you DON'T want to save a .csv file of local properties, set flag to 0
writeDataFlag = 1 # {bool}
#------------
# Additional properties to save to a .csv file (called flowData.csv) {List}
# You will always get ['x', 'y', 'z', 'cf', 'ch', 'Velocity', 'T_aw']
# Choose from ['M', 'rho', 'p', 'cp', 'T_e', 'U', 'V', 'W', 'BL Regime',
# 'Dev Length', 'Re']
additionalProperties = ['Dev Length', 'Re', 'BL Regime'] # {list of str}
#------------
# Write data to this filename
outputFilename = 'flowData' # {str}
#------------
# Save data to VTK format for nice viewing in ParaView
vtkFlag = 0
#------------
# Save data to csv
csvFlag = 0
#------------
###############################################################################
""" You probably (hopefully) won't have to change anything below this line. """
###############################################################################
#%% The code
class Fluid(object):
""" Contains all the constant fluid properties
"""
def __init__(self):
# Sutherland law reference conditions and constants
self.mu_0 = 1.716e-5
self.T_0 = 273
self.sutherland_S = 111
# Gas constants
self.R_universal = 8.314510
self.R_air = 287.058
self.molarMass_air = 28.964923942499997
# Freestream conditions
self.T_inf = T_inf
self.p_inf = p_inf
self.g_inf = g_inf
self.a_inf = (self.g_inf * self.R_air * self.T_inf) **0.5
# Constant specific heat value {float}
self.constantCp = 1.015
def calculate_cp(self, T):
"""
This function calculates the specific heat capacity and ratio fo specific heats
at a temeprature T [K]. It uses the polynomial curve fit taken from NASA's CEA code
McBride, B. Zehe, M. Gordon, S. (2002)
"NASA Glenn Coefficients for Calculating Thermodynamic Properties of Individual Species"
molar mass air = 28.964923942499997
Specific heat capacities at 298.15 K:
Cp{N2, O2, Ar, CO2} = 29.124, 29.378, 20.786, 37.135
"""
verbose = 0
if T < 200.:
T = 200.
if verbose:
print "Problem, Temp < 200 K, I'll set it to 200 K"
elif T > 6000.:
T = 6000.
if 1:
print "Problem, Temp > 6000 K, I'll set it to 6000 K"
if T < 1000.:
N2 = [2.210371497E+04, -3.818461820E+02, 6.082738360E+00, -8.530914410E-03, 1.384646189E-05, -9.625793620E-09, 2.519705809E-12]
O2 = [-3.425563420E+04, 4.847000970E+02, 1.119010961E+00, 4.293889240E-03, -6.836300520E-07,-2.023372700E-09, 1.039040018E-12]
Ar = [0., 0., 2.5, 0., 0., 0., 0.]
CO2 = [4.943650540E+04, -6.264116010E+02, 5.301725240E+00, 2.503813816E-03, -2.127308728E-07, -7.689988780E-10, 2.849677801E-13]
else:
N2 = [5.877124060E+05, -2.239249073E+03, 6.066949220E+00, -6.139685500E-04, 1.491806679E-07, -1.923105485E-11, 1.061954386E-15]
O2 = [-1.037939022E+06, 2.344830282E+03, 1.819732036E+00, 1.267847582E-03,-2.188067988E-07, 2.053719572E-11, -8.193467050E-16]
Ar = [2.010538475E+01, -5.992661070E-02, 2.500069401E+00, -3.992141160E-08, 1.205272140E-11, -1.819015576E-15, 1.078576636E-19]
CO2 = [1.176962419E+05, -1.788791477E+03, 8.291523190E+00, -9.223156780E-05, 4.863676880E-09, -1.891053312E-12, 6.330036590E-16]
coefficients = 8.314510 * np.array([N2, O2, Ar, CO2])
temperatureVector = np.array([T**-2, T**-1, 1., T, T**2, T**3, T**4])
cp_species = np.dot(coefficients, temperatureVector) / np.array([28.01340, 31.99880, 39.94800, 44.00950])
cp_air = np.sum(cp_species * np.array([.78084, .20947, .009365, .000319]))
gamma = cp_air*self.molarMass_air/(cp_air*self.molarMass_air - self.R_universal)
return cp_air
#%%
class Streamline(Fluid):
""" This class takes care of the streamlines. It imports them, calculates
the running length and then sorts them to efficiently calculate the local
running length in each cell.
"""
def __init__(self):
""" """
def importer(self, filename, FieldFlag=0):
""" This function imports the streamline data by iterating over the
Tecplot generated file. Since we are iterating over it anyway, we
calculate the length of the streamline for every point as well, saving
it in self.streamlineLengths.
No flow properties along the streamline are saved - only the length,
and x, y, z coordinates. The flow properties for each area element are
imported form the actual solution (.triq file).
"""
if not streamlineFlag:
print "No streamlines - we'll use a coordinate based running length"
return (None, None, None)
print 'Importing streamline data...'
self.filename = filename
with open(self.filename, 'r') as dataFile:
data = dataFile.read().split('Streamtrace')
# Iterate through and construct the streamline dataframes
Streamline.streamlines = 1000.*np.ones((maxSteps, nStreamlines, 3))
Streamline.streamlineLengths = 1000.*np.ones([maxSteps, nStreamlines])
count = 0 # if FieldFlag else 1
length = 0 # Initialise maxLength counter to be small
streamlineStepLengths = []
streamlineTotalLengths = []
for zone in data:
# This is iterating over the file, selecting the streamlines
#print 'Zone', count
zone = zone.strip().split('\n')
streamlineStepLength = len(zone)
streamlineStepLengths.append(int(streamlineStepLength))
# if count == 0:
# # Delete the surface label
# del zone[1]; del zone[0]
# if FieldFlag == False:
# # We don't want to read and store the velocity field data
# count += 1; continue
L = 0.
rowCount = 0
coords = []
streamlineLength = []
for row in zone:
# This is iterating over the individual streamlines
row = row.split(',')
if rowCount == 0:
x, y, z = (float(row[0]), float(row[1]), float(row[2]))
else:
xNew, yNew, zNew = (float(row[0]), float(row[1]), float(row[2]))
L += ((x-xNew)**2 + (y-yNew)**2 + (z-zNew)**2)**0.5
x, y, z = xNew, yNew, zNew
if xNew < x:
print "Warning: a streamline may be recirculating"
names = ['Streamline', 'x', 'y', 'z', 'Length']
properties = [count, xNew, yNew, zNew, L]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
coords.append((x, y, z))
streamlineLength.append(L)
rowCount += 1
# if count == 0 and FieldFlag:
# # We're constructing the surface
# self.field = zone
# else:
# # We're constructing a streamline
# #print np.shape(coords)
# streamlines[:len(zone), 3*(count):3*count+3] = np.array(coords)
# streamlineLengths[:len(zone), count] = np.array(streamlineLength)
Streamline.streamlines[:streamlineStepLength, count:count+1, :] = np.array(coords).reshape(streamlineStepLength, 1, 3)
Streamline.streamlineLengths[:streamlineStepLength, count:count+1] = np.array(streamlineLength).reshape(streamlineStepLength, 1)
streamlineTotalLengths.append(streamlineLength[-1])
count += 1
Streamline.maxStreamlineSteps = max(streamlineStepLengths)
Streamline.streamlineLengths = Streamline.streamlineLengths[:Streamline.maxStreamlineSteps,:,]
sortedLengthIndices = np.argsort(streamlineStepLengths)[::-1]
# Sort the streamlines into order of increasing length
Streamline.streamlines = Streamline.streamlines[:, sortedLengthIndices, :]
sortedStepLengths = np.array(streamlineStepLengths)[sortedLengthIndices]
Streamline.maxStepLength = sortedStepLengths[0]
# Tolerance on positioning the streamlines
tol = 0.001
# The first (longest and hopefully ~first in x)
Streamline.firstStagnationPoint = Streamline.streamlines[:, 0, 0]
for n in xrange(nStreamlines-1):
# Iterate through the streamlines and adjust the starting position.
# Skip the first (longest) streamline - we're adjusting all the others
# relative to it.
n += 1
# Get length of current streamline
length = sortedStepLengths[n]
# Get current streamline
streamline = Streamline.streamlines[:length, n:n+1, :]
# Get the running length vector
streamlineLength = Streamline.streamlineLengths[:length, n:n+1]
# Get the starting x position of the current streamline
xStartCurrent = streamline[0, 0, 0]
try:
# Try to find a position based on a small tolerance
newRow = max((np.where(abs(Streamline.firstStagnationPoint - xStartCurrent) < tol)[0][0], 0))
except:
# If not increase the tolerance a lot to ensure we find a spot.
if verbose:
print "Streamline adjustment failed (tol =", tol, "), increasing tolerance by 10."
newRow = max((np.where(abs(Streamline.firstStagnationPoint - xStartCurrent) < tol*10)[0][0], 0))
# move the streamline to new starting location
if newRow + length >= Streamline.maxStepLength:
# We are moving the streamline further back
# Physically this means this streamline finishes behind the longest
# Perhaps a swept wing where the trailing edge is behind the fuselage
print "Warning: Attempted to push streamline outside the streamlines array!"
Streamline.streamlines[newRow:newRow+length, n:n+1, :] = streamline
Streamline.streamlineLengths[newRow:newRow+length, n:n+1] = streamlineLength
else:
Streamline.streamlines[newRow:newRow+length, n:n+1, :] = streamline
Streamline.streamlineLengths[newRow:newRow+length, n:n+1] = streamlineLength
# Overwrite old area
Streamline.streamlines[:newRow, n:n+1, :] = 1000.*np.ones((newRow, 1, 3))
# Adjust streamlines to the actual maximum streamline step length not
# the maximum possible tecplot streamline steps
Streamline.streamlines = Streamline.streamlines[:Streamline.maxStepLength, :, :]
# Finished importing and calculating lengths
print 'Streamline import and length calculation complete.', count-1, 'streamlines imported with max', self.maxStreamlineSteps, 'steps.', '\n'
if verbose:
print 'Maximum calculated streamline length is', max(Streamline.streamlineLengths), 'units.'
# Check to ensure the longest streamline is weirdly longer than the next
perCentLonger = (sortedStepLengths[0] - sortedStepLengths[1])/sortedStepLengths[1]
if perCentLonger >= 10.:
print "Warning: The longest streamline is", perCentLonger, "% longer than the next longest"
"""
Include some checks here. Is max length >> length of vehicle?
Any streamline in the opposite direction to the flow (recirculation)
Any streamline >> longer than all the others/neighbours?
"""
return Streamline.streamlines, streamlineTotalLengths, Streamline.maxStepLength
#%%
class Data(Streamline):
def __init__(self, filename):
"""
"""
Streamline.__init__(self)
self.filename = filename
self.flowData = pd.DataFrame()
# Set the defaults for transition
self.naturalTransitionFlag = 0
self.ReynoldsTransitionFlag = 0
self.coordinateTransitionFlag = 0
self.roughnessInducedFlag = 0
self.laminarOnlyFlag = 0
self.turbulentOnlyFlag = 0
self.immediateTransitionFlag = 0
self.coneCorrectionFlag = 1
# Work out what we want to do with transition.
if criticalLocation == 0:
self.naturalTransitionFlag = 1
elif criticalLocation > 1000.:
self.ReynoldsTransitionFlag = 1
if verbose:
print "Using a Reynolds number based transition criterion."
elif criticalLocation < 1000.:
self.coordinateTransitionFlag = 1
if verbose:
print "Using a Streamline coordinate length based transition criterion."
elif roughnessCoordinate != 0:
self.roughnessInducedFlag = 1
if verbose:
print "Using a roughness location based transition criterion."
if regime == 'laminar':
self.laminarOnlyFlag = 1
if verbose:
print "Laminar only simulation."
elif regime == 'turbulent':
self.turbulentOnlyFlag = 1
if verbose:
print "Turbulent only simulation."
if immediateTransitionFlag:
self.immediateTransitionFlag = 1
if verbose:
print "Immediate transition - no transitional flow region."
else:
print "Sorry, not sure what you want to do about transition."
def triqImporter(self, FieldFlag=1):
""" Imports the area data from the Tecplot field data.
"""
lineNumber = 0
print 'Importing flow data...'
with open(self.filename, 'r') as dataFile:
for line in dataFile:
if lineNumber == 0:
# We're on the first line
#print line
self.nVertices, self.nTriangles, nScalars = (int(x) for x in line.split())
lineNumber += 1
# Read in the vertex information
self.vertices = pd.read_csv(self.filename, delim_whitespace=1,
names=['x', 'y', 'z'],
skiprows=1, nrows=self.nVertices, memory_map=1)
# Read in the triangle information
self.triangles = pd.read_csv(self.filename, delim_whitespace=1,
names=['v1', 'v2', 'v3'],
skiprows=self.nVertices+1, nrows=self.nTriangles,
memory_map=1)
if sampleFlag:
self.triangles = self.triangles.sample(sampleAmount).transpose()
else:
self.triangles = self.triangles.transpose()
# Read in the flow information
temp = pd.read_csv(self.filename, delim_whitespace=1,
names=["rho","U","V","W","P"],
skiprows=self.nVertices+2*self.nTriangles+1, nrows=2*self.nVertices,
memory_map=1)
self.flow = temp.iloc[1::2, :].reset_index()
if sampleFlag:
self.nTriangles = sampleAmount
print "Field import complete", self.nTriangles, 'elements,', self.nVertices, 'vertices.', '\n'
return self.vertices, self.triangles, self.flow
def getProperties(self):
"""
Calculate all the flow properties of the triangles and add it all to the
flowData dataframe. This function just applies the master function to
each row of the triangles DataFrame (list of vertices).
It calculates the centroid and averages the data to the centroid.
"""
print 'Running main code now...'
# print self.nTriangles, 'elements,', self.nVertices, 'vertices.', '\n'
self.count = 1
self.startTime = time.time()
self.percentComplete = 0
# Set up counters to keep track of problematic cells.
self.badMachCount = 0
self.badCfCount = 0
self.badTempCount = 0
self.badVelCount = 0
# Watch for very cold wall - Switch to Spalding & Chi
self.spaldingFlag = 0
# Watch for high Mach numbers - Switch to Coles
self.colesFlag = 0
# Run the main calculation
self.flowData = pd.DataFrame(self.triangles.apply(self.master, axis=0))
timeElapsed = time.time() - self.startTime
m, s = divmod(timeElapsed, 60); h, m = divmod(m, 60)
print 'Viscous correction code complete.', '\n'
print 'Time elapsed', "%d:%02d:%02d" % (h, m, s)
print 'Average time per loop', timeElapsed/self.count
if self.spaldingFlag:
print "Warning: T_aw/T_w < 0.2 was encountered - Spalding & Chi method was employed but be careful of results."
if self.colesFlag:
print "Warning: M > 10 was encountered, the van Driest is known to be inaccurate - Coles' method (1964) might be better here."
print "Bad cell counts (Total %d):" % self.nTriangles
names = ['Mach', 'cf', 'T', 'Vel']
if sampleFlag:
amounts = [(self.badMachCount/float(sampleAmount) * 100., self.badMachCount),
(self.badCfCount/float(sampleAmount) * 100., self.badCfCount),
(self.badTempCount/float(sampleAmount) * 100., self.badTempCount),
(self.badVelCount/float(sampleAmount) * 100., self.badVelCount)]
else:
amounts = [(self.badMachCount/float(self.nTriangles) * 100., self.badMachCount),
(self.badCfCount/float(self.nTriangles) * 100., self.badCfCount),
(self.badTempCount/float(self.nTriangles) * 100., self.badTempCount),
(self.badVelCount/float(self.nTriangles) * 100., self.badVelCount)]
for c1, c2 in zip(names, amounts):
print "%-10s %s" % (c1, c2)
print '\n'
return self.flowData.transpose()
def master(self, row):
""" This function iterates over the list of triangle vertices. To only
iterate over a potentially very long list, all calculations are done
at once - looping only once. Unfortuantely to avoid the overhead
associated with calling functions in python, most calculations are
done inside master() - this makes it long and difficult to read.
The properties calculated include:
A - The area of the triangle calculated with the cross product of
the vectors.
n - The normal of the triangle, again from the cross product. By
convention normal is point OUTWARDS, into the flow.
Cx, Cy, Cz - The coordinates of the centroid of each triangle.
Re - The local Reynolds number calculated form an interpolated
guess at the local running length based on the two closest
streamlines (either side of the point).
Cf - The local skin friction coefficient. Check associated docs.
Ch - The local heat transfer coefficient (Stanton number). Check
associated docs.
The following properties are taken from the Cart3D solution file and
(currently) linearly interpolated to the centroid. Note Cart3D
normalises its data against the freestream value and gamma (=1.4).
rho - density [kg/m^3]
U - x velocity [m/s]
V - y velocity [m/s]
W - z velocity [m/s]
p - pressure [Pa]
Currently takes a simple average of the properties - should implement
a weighted average based on distance from centroid to vertex when areas
get bigger. Depending on computational cost, set up a tolerance.
"""
"""
if row some multiple of total number of triangles:
print out a status update and estimate of time
"""
#print "count", self.count
if verbose:
reportingInterval = 1
else:
reportingInterval = 5
timeElapsed = time.time() - self.startTime
if self.count%(reportingInterval * self.nTriangles/100) == 0 or self.count == 1000:
m, s = divmod(timeElapsed, 60); h, m = divmod(m, 60)
print self.percentComplete, '% of elements completed so far. Wall clock time', "%d:%02d:%02d" % (h, m, s)
printFlag = 0
if self.percentComplete > 0:
timeRemaining = timeElapsed *(100 - self.percentComplete)/self.percentComplete
mRemaining, sRemaining = divmod(timeRemaining, 60)
hRemaining, mRemaining = divmod(mRemaining, 60)
print "Approximately", "%d:%02d:%02d" % (hRemaining, mRemaining, sRemaining), "remaining."
printFlag = 1
if self.count == 1000 and not printFlag:
timeRemaining = timeElapsed/1000. * self.nTriangles
mRemaining, sRemaining = divmod(timeRemaining, 60)
hRemaining, mRemaining = divmod(mRemaining, 60)
print "Rough initial estimate:", "%d:%02d:%02d" % (hRemaining, mRemaining, sRemaining), "remaining."
self.percentComplete += reportingInterval; #self.count += 1
# These are the vertices of the specific triangle - they correspond to
# indices in the vertices AND flow DataFrames
# Note STL is indexed from 1, hence we need to minus one to get the
# dataframe index.
v1i, v2i, v3i = row[0] - 1, row[1] - 1, row[2] - 1
if v1i > self.nVertices or v2i > self.nVertices or v3i > self.nVertices:
print 'Vertex indexing has died - max > than number of vertices.'
# These are the (x, y, z) indices of each vertex
v1 = np.array(self.vertices.iloc[v1i])
v2 = np.array(self.vertices.iloc[v2i])
v3 = np.array(self.vertices.iloc[v3i])
# Form two vectors forming the triangle
v1v2 = v2 - v1
v1v3 = v3 - v1
# Calculate area and normal from cross product given the above vectors.
area = 0.5 * np.linalg.norm(np.cross(v1v2, v1v3))
normal = tuple(np.cross(v1v2, v1v3)/area)
# Calculate the centroid coodinates.
centroidx = np.mean([v1[0], v2[0], v3[0]])
centroidy = np.mean([v1[1], v2[1], v3[1]])
centroidz = np.mean([v1[2], v2[2], v3[2]])
centroid = (centroidx, centroidy, centroidz)
# Calculate the mean surface flow properties at the centroid of each triangle.
# Order: Cp,Rho,U,V,W,Pressure
properties = np.mean([self.flow.iloc[v1i], self.flow.iloc[v2i], self.flow.iloc[v3i]], axis=0)
self.rho, U, V, W, self.p = properties[1], properties[2], properties[3], properties[4], properties[5]
# Undo the normalisation Cart3D uses for some currently unknown reason
self.rho *= rho_inf; U *= Fluid.a_inf; V *= Fluid.a_inf; W *= Fluid.a_inf; self.p *= rho_inf*Fluid.a_inf**2.
# print 'rho', self.rho, 'U', U, 'V', V, 'W', W, 'p', self.p
# Need to catch the problematic data Cart3D sometimes produces -
# generally degenerencies in small cut cells. Known issue.
if self.p < 1e-1:
self.p = 1e-1
if verbose:
print "Warning: Pressure < 1e-1 at", v1, v2, v3, "setting to 1e-1 Pa."
if self.rho < 1e-6:
self.rho = 1e-6
if verbose:
print "Warning: Density < 1e-6 at", v1, v2, v3, "setting to 1e-6 kg/m^3."
# Calculate local velocity vector
self.velocityMagnitude = (U**2. + V**2. + W**2.)**0.5
velocityDirection = np.array([U, V, W], dtype='float64') / self.velocityMagnitude
#print 'velocity', velocityMagnitude
if self.velocityMagnitude > 1.5*M_inf*Fluid.a_inf:
self.badVelCount += 1
if verbose:
print "Warning: velocity > 1.5x freestrem at", v1, v2, v3
# Calculate the temperature based on ideal gas law
self.T = self.p / (self.rho * Fluid.R_air)
#print 'T', self.T
if self.T > 800.:
#print "Warning: High edge temperature, constant Cp assumption might be in trouble - consider variable Cp."
self.badTempCount += 1
# Calculate local Mach number
try:
self.M = self.velocityMagnitude/((g_inf*Fluid.R_air*self.T)**0.5)
if self.M > 1.5 * M_inf:
# print "Warning high Mach number,", self.M, "Temperature is", self.T
self.badMachCount += 1
# print "x coordinate is", centroid[0], "Are you in the wake?"
self.M = M_inf
if self.M > 10.:
self.colesFlag = 1
except:
print 'Check local sound speed at', v1, v2, v3
# Calculate local dynamic viscosity using Keye's law if T < 95 Kelvin
if self.T < 95.:
self.mu = (1.488 * 10**-6.) * self.T**0.5 / (1. + 122.1*(10.**(-5/self.T))/self.T)
self.mu_wall = (1.488 * 10**-6.) * T_wall**0.5 / (1. + 122.1*(10.**(-5/T_wall))/T_wall)
else:
self.mu = Fluid.mu_0 * (self.T/Fluid.T_0)**(3./2) * ((Fluid.T_0 + Fluid.sutherland_S) / (self.T + Fluid.sutherland_S))
self.mu_wall = Fluid.mu_0 * (T_wall/Fluid.T_0)**(3./2) * ((Fluid.T_0 + Fluid.sutherland_S) / (T_wall + Fluid.sutherland_S))
#print 'mu/mu_wall', self.mu/self.mu_wall
# Calculate the local streamline based running length
self.calculate_runningLength(centroid)
# Calculate the local Reynolds number
self.localReynolds = self.rho * self.velocityMagnitude * self.localRunningLength / self.mu
#print 'localReynolds', self.localReynolds
# We always assume a laminar boundary layer
self.laminar = 1; self.transitional = 0; self.turbulent = 0
if self.laminar:
self.calculate_laminarCf()
if self.transitional:
self.calculate_transitionalCf()
if self.turbulent:
self.calculate_turbulentCf()
# The above computed skin friction coefficients should be corrected for
# thickness with form factors. Unique factors are implmented here
# for wings and bodies
wallShearMagnitude = self.cf * 0.5 * self.rho * self.velocityMagnitude**2.
wallShearVector = wallShearMagnitude * velocityDirection
viscousForce = wallShearVector*area
# Calculate Reynold's analogy factor
Pr_T = 0.86; Pr_L = 0.71
bracket = 1./(5.*0.4) * (1. - Pr_T) * ((np.pi**2.)/6. + 1.5*(1. - Pr_T)) + (Pr_L/Pr_T - 1.) + log(1. + (5./6.)*(Pr_L/Pr_T - 1.))
s = Pr_T * (1. + 5.*(self.cf/2.)**0.5 * bracket)
# Calculate Stanton number from modified Reynold's analogy
ch = (1./s) * self.cf/2.
# Calculate heat transfer coefficient
h = ch*self.rho*self.velocityMagnitude*Fluid.calculate_cp(self.T)
# Calculate heat transfer into the wall
bracket = self.T * (1. + self.r * (g_inf - 1.)/2. * self.M**2.) - T_wall
q_wall = ch * self.rho*self.velocityMagnitude * Fluid.calculate_cp(self.T) * bracket
if verbose:
print 'Local properties...'
names = ['area', 'centroidx', 'centroidy', 'centroidz', 'normal', 'rho', 'U', 'V', 'W', 'p', 'cf', 'Ff']
properties = [area, centroidx, centroidy, centroidz, normal, self.rho, U, V, W, self.p, self.cf, viscousForce]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
# Increment the element counter
self.count += 1
return pd.Series({'A': area,
'x': centroid[0],
'y': centroid[1],
'z': centroid[2],
'n': normal,
'rho': self.rho,
'U': U,
'V': V,
'W': W,
'Velocity': self.velocityMagnitude,
'M': self.M,
'p': self.p,
'cf': self.cf,
'ch': ch,
#'cp': self.cp,
'BL Regime': self.BLregime,
'Dev Length': self.localRunningLength,
'Re': self.localReynolds,
'Ff': viscousForce,
'T_e': self.T,
'T_aw': self.T_adiabaticWall,
'q_wall': q_wall})
def calculate_runningLength(self, centroid, searchBracket=200):
""" This function calculates the local running length given
a location.
If there is streamline data available - it will use that, otherwise
it just uses a cartesian based running length (i.e. x coordinate).
"""
# Firstly check we actually want the streamline running length
if not streamlineFlag:
# MAKE SURE THIS ISN'T NEGATIVE
self.localRunningLength = centroid[0] + LEoffset
if self.localRunningLength <= 0.005:
# Need to include blunted leading edge effects here but for the moment
# we'll just set it to 0.005
self.localRunningLength = 0.005
return
# Populate a large array repeating the current location
# self.Location = np.tile(centroid, (self.maxStreamlineSteps, nStreamlines))
self.Location = np.broadcast_to(centroid, (searchBracket, nStreamlines, 3))
currentX = centroid[0]
# print 'Current centroid', centroid
# Tolerance on finding the position on the streamlines
tol = 0.001
try:
# Try to find a position based on a small tolerance
rowPosition = max((np.where(abs(Streamline.firstStagnationPoint - currentX) < tol)[0][0], 0))
except:
# If not increase the tolerance a lot to ensure we find a spot.
if verbose:
print "Row position adjustment failed (tol =", tol, "), increasing tolerance by 10."
rowPosition = max((np.where(abs(Streamline.firstStagnationPoint - currentX) < tol*10)[0][0], 0))
if rowPosition <= searchBracket/2:
# We are at the top of the array
self.streamlineSection = self.streamlines[:searchBracket, :, :]
elif rowPosition >= Streamline.maxStepLength - searchBracket/2:
# We are at the bottom of the array
self.streamlineSection = self.streamlines[searchBracket:, :, :]
else:
# We are in the middle
self.streamlineSection = self.streamlines[rowPosition-searchBracket/2:rowPosition+searchBracket/2, :, :]
# print "Streamline section goes between", self.streamlineSection[0, 0, 0], self.streamlineSection[-1, 0, 0]
# delta x, delta y, delta z from location to every point
self.deltas = self.Location - self.streamlineSection
# Square the distances
self.deltas = np.square(self.deltas)
# Separate dx, dy and dz to sum together
dx = self.deltas[:, :, 0]
dy = self.deltas[:, :, 1]
dz = self.deltas[:, :, 2]
# Take the square root to find the Euclidean distance
self.distances = np.sqrt(dx + dy + dz)
"""
POTENTIAL SPEED IMPROVEMENT
# possibly need to have:
# temp = np.asfortranarray(self.distances)
# streamlineMinimumsIndices = temp.argmin(axis=0)
"""
"""
NEED TO INCLUDE GRAD CHECK HERE TO ENSURE STREAMLINES ARE ON THE CORRECT SIDE OF THE OBJECT
"""
# Indices of two closest streamlines (column indices)
# Column index of two closest streamline points
neighbouringStreamlineIndices = self.distances.min(axis=0).argsort(kind='mergesort')[:2] # index
# print 'neighbouringStreamlineIndices', neighbouringStreamlineIndices
# Indices of the step number to the minimum distances
# Row index of two closest streamline points
neighbouringStreamlineStepIndices = self.distances.argsort(axis=0, kind='mergesort')[0, neighbouringStreamlineIndices]
# print 'neighbouringStreamlineStepIndices', neighbouringStreamlineStepIndices
# # Indices of the two closest streamline points
# neighbouringStreamlines_indices = np.array([neighbouringStreamlineStepIndices, neighbouringStreamlineIndices])
# print 'neighbouringStreamlines_indices', neighbouringStreamlines_indices
# Distances to two closest streamlines
neighbouringStreamlines_distances = self.distances[neighbouringStreamlineStepIndices, neighbouringStreamlineIndices] # value
# print "neighbouringStreamline_distances", neighbouringStreamlines_distances
if np.max(abs(neighbouringStreamlines_distances)) > 1.:
print "WARNING: Closest streamline seems to be far away at", np.max(neighbouringStreamlines_distances), "m."
print 'Current centroid', centroid
# Running length at the two neighbouring streamline points
# Need to correct the indexing because we only look at a window above
neighbouringStreamlineStepIndices = neighbouringStreamlineIndices + rowPosition
# neighbouringStreamlines_indices = np.array([neighbouringStreamlineStepIndices, neighbouringStreamlineIndices])
neighbouringStreamlines_lengths = Streamline.streamlineLengths[neighbouringStreamlineStepIndices, neighbouringStreamlineIndices]
# print 'neighbouringStreamlines_lengths', neighbouringStreamlines_lengths
# Linearly interpolate between two neighbouring streamlines
self.localRunningLength = float(neighbouringStreamlines_lengths[0] + neighbouringStreamlines_distances[0]*np.diff(neighbouringStreamlines_lengths)/np.sum(neighbouringStreamlines_distances))
# print 'localRunningLength', self.localRunningLength
if self.localRunningLength <= 0.005:
# Need to include blunted leading edge effects here but for the moment
# we'll just set it to 0.005
self.localRunningLength = 0.005
def calculate_laminarCf(self, checkFlag=1):
# Check to ensure flow isn't transitional
if checkFlag:
if not self.laminarOnlyFlag:
# Not doing a laminar only analysis
if self.turbulentOnlyFlag:
# Running turbulent only analysis
self.laminar = 0; self.transitional = 0; self.turbulent = 1
return
elif self.naturalTransitionFlag:
# Natural transition criterion
self.Re_critical = 10.**(6.421 * exp((1.209e-4) * self.M**2.641))
"""
NEED TO INCLUDE THE WING SWEEP STUFF HERE
Re_critical = Re_critical*(0.787 * cos(wingLEsweep)**4.346 - 0.7221*exp(-0.0991*wingLEsweep) + 0.9464)
"""
if self.localReynolds >= self.Re_critical:
# The flow is transitional, break out of the laminar analysis
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
elif self.roughnessInducedFlag:
# Roughness induced transition condition
pass
elif self.ReynoldsTransitionFlag:
# Critical Reynolds criterion
if criticalLocation >= self.localReynolds:
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
elif self.coordinateTransitionFlag:
if criticalLocation >= self.localRunningLength:
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
# The above transition checks all showed that it was laminar flow,
# continue laminar analysis:
# Calculate the laminar skin friction coefficient
# Set recovery factor
self.r = 0.85 # van Driest says 0.85 to 0.89 for lam to turbs
# Calculate the adiabatic wall temperature
self.T_adiabaticWall = (1. + self.r*((g_inf - 1)/2.) * self.M**2.) * self.T
# T_awOnT = self.T_adiabaticWall/self.T
# Reference temperature
# T_reference = self.T*(0.45 * 0.55 * T_awOnT + 0.16*self.r*(g_inf - 1)/2. * self.M**2.)
T_reference = self.T*(1. + 0.032*self.M**2. + 0.58 * (T_wall/self.T - 1.))
# Reference density
rho_reference = self.p/(Fluid.R_air * T_reference)
# Reference viscosity
mu_reference = 1.458e-6 * ((T_reference)**1.5) / (T_reference + 110.4)
# Reference Reynolds
# Re_reference = self.M * (g_inf*Fluid.R_air*T_reference)**0.5 * rho_reference * self.localRunningLength / mu_reference
Re_reference = self.velocityMagnitude * rho_reference * self.localRunningLength / mu_reference
try:
cf = 0.664 / (Re_reference)**0.5
except:
print 'Calculation of laminar flow Cf failed'
cf = 0.
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.73
self.cf = cf
# This is to show lam (0 = BLregime) vs transitional (1 < BLregime < 0)
# vs turb flow (BLregime = 0)
self.BLregime = 0
return self.cf
def calculate_transitionalCf(self):
# Set recovery factor
self.r = 0.87 # van Driest says 0.85 to 0.89 for lam to turbs
# self.criticalRunningLength_start = (6.421*self.mu*exp(1.209e-4 * self.M**2.641)) / (self.rho * self.velocityMagnitude)
# criticalRunningLength_end = self.criticalRunningLength_start * (1. + self.Re_critical**(-0.2))
# Check we aren't turbulent
if self.immediateTransitionFlag:
# Ignoring transitional region
self.laminar = 0; self.transitional = 0; self.turbulent = 1
return
# elif self.localRunningLength >= criticalRunningLength_end:
# Flow is now fully turbulent
# self.laminar = 0; self.transitional = 0; self.turbulent = 1
# return
else:
# The above checks all showed we are still in a transitional region
cf_laminar = self.calculate_laminarCf(checkFlag=0)
try:
cf_turbulent = self.calculate_turbulentCf(r=0.87)
except:
print "Calculation of transitional flow turbulent Cf failed"
names = ['Local Re', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'P', 'rho']
properties = [self.localReynolds, self.mu, self.mu_wall, self.T_adiabaticWall, self.T, self.p, self.rho]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
# Set up the variables to vary between laminar and turbulent skin friction coefficients.
exponent = -3. *(exp(log(2)/(5.*self.criticalRunningLength_start) * self.Re_critical**(-0.2)*(self.localRunningLength - self.criticalRunningLength_start)) - 1.)**2.
epsilon = 1 - exp(exponent)
try:
cf = (1-epsilon)*cf_laminar + epsilon*cf_turbulent
except:
print "Calculation of transitional flow Cf failed"
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= (1-epsilon)*1.15 + epsilon*1.73
self.cf = cf
# This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1)
# vs turb flow (BLregime = 1)
self.BLregime = 0.5
return self.cf
def calculate_turbulentCf(self, r=0.89):
#print "Turbulent flow"
# Calculate the turbulent skin fricton coefficient
# van Driest says r = 0.85 to 0.89 for lam to turbs
self.r = r
self.T_adiabaticWall = (1. + self.r*((g_inf - 1.)/2.) * self.M**2.) * self.T
# Quick wall temp check
if T_wall/self.T_adiabaticWall < 0.2:
self.spaldingFlag = 1
cf = self.calculate_turbulentCf_spaldingChi()
else:
# Set up the variables/coefficients for the Van Driest estimate
aSquared = self.r * (g_inf - 1.)/2. * self.M**2. * self.T/T_wall
b = self.T_adiabaticWall/T_wall - 1.
denominator = (b**2. + 4.*aSquared)**0.5
A = self.clean_A(aSquared, b, denominator)
B = self.clean_B(aSquared, b, denominator)
# Solve the implicit equation for skin friction
cf_func = lambda cf: 4.15*log(self.localReynolds*cf*self.mu/self.mu_wall, 10) + 1.7 - ((np.arcsin(A) + np.arcsin(B)) / ((cf*(self.T_adiabaticWall - self.T)/self.T)**0.5))
try:
cf = brentq(cf_func, 1e-15, 0.1)
self.calculate_turbulentCf_spaldingChi()
except:
if verbose:
print "Calculation of turbulent Cf failed, Flow properties at culprit cell below."
print "Am I in the Wake? Running length is", self.localRunningLength, "Set cf to zero."
names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach']
properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
cf = 0.
# USE THE SMART MEADER CORRELATION IF VAN DRIEST FAILS
# Reference temperature
# T_reference = self.T*(1. + 0.032*self.M**2. + 0.58 * (T_wall/self.T - 1.))
#
# # Reference density
# rho_reference = self.p/(Fluid.R_air * T_reference)
#
# # Reference viscosity
# mu_reference = 1.458e-6 * ((T_reference)**1.5) / (T_reference + 110.4)
#
# # Reference Reynolds
# Re_reference = self.velocityMagnitude * rho_reference * self.localRunningLength / mu_reference
#
# cf = 0.02296/(Re_reference**0.139) * (rho_reference/self.rho)**0.861 * (mu_reference/self.mu)**0.139
self.badCfCount += 1
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.15
# End cf (van driest or Spalding & Chi) estimate
self.cf = cf
# This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1)
# vs turb flow (BLregime = 1)
self.BLregime = 1
return self.cf
def clean_A(self, a, b, denominator):
"""
This function is required to avoid math domain errors in an arcsin
calculation in the Van Driest calculation.
"""
A = ( 2.*a - b ) / denominator
if A < -1.:
return -1.
elif A > 1.:
return 1.
else:
return A
def clean_B(self, a, b, denominator):
"""
This function is required to avoid math domain errors in an arcsin
calculation in the Van Driest calculation.
"""
B = ( b ) / denominator
if B < -1.:
return -1.
elif B > 1.:
return 1.
else:
return B
def calculate_turbulentCf_spaldingChi(self, r=0.89):
# Calculate the turbulent skin fricton coefficient using the Spalding Chi method
# This is more accurate than Van driest for T_wall/self.T_adiabaticWall < 0.2
# van Driest says r = 0.85 to 0.89 for lam to turbs
self.r = 0.89
# Set up the variables/coefficients for the estimate
# Various wall temperature ratios
TawOnT = self.T_adiabaticWall/self.T
TwOnT = T_wall/self.T
denominator = ( (TawOnT + TwOnT)**2. - 4.*TwOnT )**0.5
alpha = (TawOnT + TwOnT - 2.) / denominator
beta = (TawOnT - TwOnT) / denominator
F_c = (TawOnT - 1.) / (np.arcsin(alpha) + np.arcsin(beta))**2.
# Solve the implicit equation for the incompressible skin friction
LHS = self.localReynolds / (F_c*(TawOnT**0.772 * TwOnT**-1.474))
K = 0.4
E = 12.
kappa = lambda cf: K * (2./cf)**0.5
# bracket = (2. + (2. - kappa)**2.)*exp(kappa) - 6. - 2.*kappa - (1./12)*kappa**4. - (1./20)*kappa**5. - (1./60)*kappa**6. - (1./256)*kappa**7.
bracket = lambda cf: (2. + (2. - kappa(cf))**2.)*exp(kappa(cf)) - 6. - 2.*kappa(cf) - (1./12)*kappa(cf)**4. - (1./20)*kappa(cf)**5. - (1./60)*kappa(cf)**6. - (1./256)*kappa(cf)**7.
cf_inc_func = lambda cf: (1./12)*(2./cf)**2. + (1./(E*K**3.)) * bracket(cf) - LHS
try:
cf_inc = brentq(cf_inc_func, 5e-6, 0.1)
cf = (1./F_c) * cf_inc
except:
# print "Calculation of turbulent Cf failed, Flow properties at culprit cell below."
# print "Am I in the Wake? Running length is", self.localRunningLength, "Set cf to zero."
# names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach']
# properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M]
# for c1, c2 in zip(names, properties):
# print "%-10s %s" % (c1, c2)
# print '\n'
cf = 0.
self.badCfCount += 1
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.15
return cf
#%%
class postProcessor(Data):
def __init__(self, Field, flowData):
self.flowData = flowData
self.propertiesToSave = ['cf', 'ch', 'Velocity', 'T_aw'] + additionalProperties
def viscousForceCoefficients(self):
""" This function will calculate and return the viscous force
coefficients. The forces are calculated and stored here.
Sign convention
x - positive toward tail (flow in the direction of positive x)
y - positive upwards
z - positive left spanwise sitting in cockpit facing forwards
"""
# Visous forces in body axes
viscousForces_body = sum(self.flowData.loc['Ff'])
# Transform to wind axes
viscousForces = viscousForces_body
# Calculate velocity
u_inf = M_inf*Fluid.a_inf
cl_viscous = viscousForces[0]/(0.5*S_ref*rho_inf*u_inf**2.)
cd_viscous = viscousForces[1]/(0.5*S_ref*rho_inf*u_inf**2.)
return cl_viscous, cd_viscous
def viscousMomentCoefficients(self):
""" Similar to the above this function will calculate the viscous
pitching moment coefficients.
Sign convention
Directions same as above
Positive rotations defined by RH rule
"""
cm_viscous = 5
return cm_viscous
def saveAs_CSV(self, outputFilename=outputFilename, properties=['x', 'y', 'z', 'cf', 'ch', 'Velocity', 'T_aw']):
""" This function will write the flow data to file so we can view it
in Paraview.
"""
outputFilename += '.csv'
if additionalProperties != []:
for i in additionalProperties:
properties.append(i)
# self.flowData = self.flowData.round(decimals=5)
self.flowData.to_csv(outputFilename, sep=',', columns=properties, index=0, index_label=0, float_format='%.3f')
print "output file saved as", outputFilename
def saveSlice_CSV(self, outputFilename=outputFilename, xSlice=[], ySlice=[], zSlice=[]):
""" Take a slice and save it to csv """
outputFilename += '_slice.csv'
# # This defines how 'narrow' slice we want. Why am I writing this if ParaView will do it fark
# tol = 1e-2
#
# # Pre allocate empty DF here?
# slicedData = pd.DataFrame()
#
# if not xSlice:
# # We have some slices along x to make
# for point in xSlice:
# # we want to slice at all of these points
# > xSlice[point] - tol
# self.flowData.transpose().loc[(self.flowData.transpose()["x"] > 0.599 & self.flowData.transpose()["x"] < 0.601 & self.flowData.transpose()["z"] == 0), "cf"]
# elif not ySlice:
# # Slices along y to take
# elif not zSlice:
# # And slices aong z
flowData = self.flowData.apply(pd.to_numeric, errors='ignore')
slicedData_indices = (flowData["z"] > -0.01) & (flowData["z"] < 0.01)
slicedData = flowData.loc[slicedData_indices]
slicedData.to_csv(outputFilename, sep=',', index=0, index_label=0)
print "Slices saved in", outputFilename
def saveAs_VTK(self, outputFilename):
"""
Write the flow data as a VTK unstructured grid - STILL NOT SURE WHY???
"""
outputFilename += '.vtu'
vtuFile = open(outputFilename, "w")
NumberOfPoints = Field.nVertices
NumberOfTriangles = Field.nTriangles
# Write the header
vtuFile.write("<VTKFile type=\"UnstructuredGrid\" byte_order=\"BigEndian\">\n")
vtuFile.write("<UnstructuredGrid>")
vtuFile.write("<Piece NumberOfPoints=\"%d\" NumberOfCells=\"%d\">\n" %
(NumberOfPoints, NumberOfTriangles))
# Write the point coordinates
vtuFile.write("<Points>\n")
vtuFile.write(" <DataArray type=\"Float32\" NumberOfComponents=\"3\"")
vtuFile.write(" format=\"ascii\">\n")
for index in range(NumberOfPoints-500000):
x, y, z = Field.vertices.iloc[index]
vtuFile.write(" %e %e %e\n" % (x, y, z))
vtuFile.write(" </DataArray>\n")
vtuFile.write("</Points>\n")
vtuFile.write("<Cells>\n")
# Write the connectivity
vtuFile.write(" <DataArray type=\"Int32\" Name=\"connectivity\"")
vtuFile.write(" format=\"ascii\">\n")
temp = Field.triangles.transpose()
for index in range(NumberOfTriangles):
v1, v2, v3 = temp.iloc[index]
vtuFile.write(" %d %d %d\n" % (v1, v2, v3))
vtuFile.write(" </DataArray>\n")
# Write the offsets
# vtuFile.write(" <DataArray type=\"Int32\" Name=\"offsets\"")
# vtuFile.write(" format=\"ascii\">\n")
# # Since all of the point-lists are concatenated, these offsets into the connectivity
# # array specify the end of each cell.
# for point in range(NumberOfTriangles):
# if two_D:
# conn_offset = 4*(1+i+j*nic)
# else:
# conn_offset = 8*(1+i+j*nic+k*(nic*njc))
# vtuFile.write(" %d\n" % conn_offset)
# vtuFile.write(" </DataArray>\n")
# Write the types
vtuFile.write(" <DataArray type=\"UInt8\" Name=\"types\"")
vtuFile.write(" format=\"ascii\">\n")
VTKtype = 5 # VTK_TRIANGLE
for point in range(NumberOfTriangles):
vtuFile.write(" %d\n" % VTKtype)
vtuFile.write(" </DataArray>\n")
vtuFile.write("</Cells>\n")
# Write the flow variables
vtuFile.write("<CellData>\n")
# Write variables from the dictionary.
for variable in self.propertiesToSave:
vtuFile.write(" <DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"1\"" % (variable))
vtuFile.write(" format=\"ascii\">\n")
for index in range(NumberOfTriangles):
vtuFile.write(" %e\n" % Field.flowData.transpose()[variable].iloc[index])
vtuFile.write(" </DataArray>\n")
# Write the velocity vector - have to do this separately because it's a vector
vtuFile.write(" <DataArray Name=\"Velocity vector\" type=\"Float32\" NumberOfComponents=\"3\"")
vtuFile.write(" format=\"ascii\">\n")
for index in NumberOfTriangles:
U, V, W = (Field.flowData.transpose()['U'].iloc[index],
Field.flowData.transpose()['V'].iloc[index],
Field.flowData.transpose()['W'].iloc[index])
vtuFile.write(" %e %e %e\n" % (U, V, W))
vtuFile.write(" </DataArray>\n")
# Write footers and close file
vtuFile.write("</CellData>\n")
vtuFile.write("</Piece>\n")
vtuFile.write("</UnstructuredGrid>\n")
vtuFile.write("</VTKFile>\n")
vtuFile.close()
return
#%% ----- Run the program
if __name__ == '__main__':
print time.strftime("%H:%M:%S"), 'Starting....'
# Run Tecplot in batch mode to generate and save the streamline data
# try:
# call(['tec360', '-b', 'Components.i.plt', 'retrieveStreamlines.mcr'])
# except:
# print 'Import of Tecplot streamline data failed'
# Initialise Fluid class - sets up basic fluid and freestream properties
Fluid = Fluid()
# Initialise streamline class
Streamlines = Streamline()
streamlineCoordinates, streamlineLengths, maxSteplength = Streamlines.importer(streamlineFilename)
StreamlinesDict = Streamline.__dict__
# Initialise a data class, this contains all the Field (Cart3D) data.
Field = Data(cart3DFilename)
# Import Cart3D data
vertices, triangles, flow = Field.triqImporter()
# Run the actual code - calculate viscous forces
flowData = Field.getProperties()
flowData = flowData.round(decimals=5)
post = postProcessor(Field, flowData)
if csvFlag:
post.saveAs_CSV()
if vtkFlag:
post.saveAs_CSV()
post.saveSlice_CSV()
DataDict = Field.__dict__
#Field.plotter()
|
AlexanderWard1/VC3D
|
Integral Methods/addViscous_Bowcutt.py
|
Python
|
mit
| 61,090
|
[
"ParaView",
"VTK"
] |
0aa3c0bb5c397017d135aff32de5bb0dd1f38ce36d1b21f8150bf29127dffedd
|
#! python3
# Check FLAC files for tag consistency. Used when ripping CDs to make sure
# the resulting files are cleaned up and ready for moving to the master tree
# of audio files.
#
# Can perform the following checks:
#
# * Check that all required tags are present in all FLAC tracks.
# * Check that tags which should be the same across tracks of a single album
# are in fact the same.
# * Check that tags which should be different across tracks of a single album
# are in fact different.
# * Check that all tags present are already known to this script.
# * There for tags which have been replaced with newer ones (e.g. prefer Label
# to Organization). Check that either old or new versions, but not both,
# are present. Optionally warn if the old version is used.
# * Miscellaneous checks for reasonableness - for instance, make sure that the
# tracks for an album have consecutive track numbers.
import argparse
import collections
from collections import defaultdict
import os
import re
import sys
from CommonUtils import *
from CommonUtils import uprint as print
def enum(*args):
enums = dict(zip(args, range(len(args))))
return type('Enum', (), enums)
TagKind = enum('Required', 'ReqClassical', 'Optional', 'Mapped')
TagQual = enum('AllSame', 'DiscSame', 'AllDiff', 'Ignored')
known_tags = {
# Tag name Kind Qualifier Multivalued
'accurateripdiscid': (TagKind.Required, TagQual.AllDiff, False),
'accurateripresult': (TagKind.Required, TagQual.Ignored, False),
'album': (TagKind.Required, TagQual.AllSame, False),
'album artist sort': (TagKind.Required, TagQual.AllSame, False),
'albumartist': (TagKind.Required, TagQual.AllSame, False),
'albumartistterse': (TagKind.ReqClassical, TagQual.AllSame, False),
'artist sort': (TagKind.Optional, TagQual.Ignored, True),
'artist': (TagKind.Required, TagQual.Ignored, True),
'artistterse': (TagKind.ReqClassical, TagQual.Ignored, False),
'catalog #': (TagKind.Optional, TagQual.Ignored, False),
'cddb disc id': (TagKind.Required, TagQual.DiscSame, False),
'cdgap': (TagKind.Required, TagQual.AllDiff, False),
'cdindex': (TagKind.Required, TagQual.AllDiff, False),
'cdtoc': (TagKind.Required, TagQual.DiscSame, False),
'comment': (TagKind.Optional, TagQual.Ignored, False),
'compilation': (TagKind.Optional, TagQual.AllSame, False),
'composer': (TagKind.Required, TagQual.Ignored, True),
'composersort': (TagKind.ReqClassical, TagQual.Ignored, True),
'composerterse': (TagKind.ReqClassical, TagQual.Ignored, False),
'conductor': (TagKind.Optional, TagQual.Ignored, False),
'conductorsort': (TagKind.Optional, TagQual.Ignored, False),
'crc': (TagKind.Required, TagQual.Ignored, False),
'date': (TagKind.Required, TagQual.AllSame, False),
'description': (TagKind.Optional, TagQual.Ignored, False),
'discnumber': (TagKind.Required, TagQual.DiscSame, False),
'disctotal': (TagKind.Required, TagQual.AllSame, False),
'encoded by': (TagKind.Required, TagQual.AllSame, False),
'encoder': (TagKind.Required, TagQual.AllSame, False),
'encoder settings': (TagKind.Required, TagQual.AllSame, False),
'genre': (TagKind.Required, TagQual.DiscSame, False),
'hdcd': (TagKind.Optional, TagQual.Ignored, False),
'instrument': (TagKind.Optional, TagQual.Ignored, False),
'isrc': (TagKind.Optional, TagQual.Ignored, False),
'label': (TagKind.Required, TagQual.AllSame, False),
'length': (TagKind.Required, TagQual.Ignored, False),
'mbid': (TagKind.Optional, TagQual.Ignored, False),
'nocompilationtest': (TagKind.Optional, TagQual.DiscSame, False),
'nomultipleartisttest': (TagKind.Optional, TagQual.DiscSame, False),
'orchestra': (TagKind.Optional, TagQual.Ignored, False),
'organization': (TagKind.Mapped, TagQual.AllSame, False),
'performer': (TagKind.Optional, TagQual.Ignored, False),
'period': (TagKind.ReqClassical, TagQual.Ignored, False),
'pre-emphasis': (TagKind.Optional, TagQual.Ignored, False),
'profile': (TagKind.Required, TagQual.AllSame, False),
'rating': (TagKind.Optional, TagQual.Ignored, False),
'replaygain_album_gain': (TagKind.Required, TagQual.DiscSame, False),
'replaygain_album_peak': (TagKind.Required, TagQual.DiscSame, False),
'replaygain_track_gain': (TagKind.Required, TagQual.Ignored, False),
'replaygain_track_peak': (TagKind.Required, TagQual.Ignored, False),
'soloists': (TagKind.Optional, TagQual.Ignored, True),
'soloistssort': (TagKind.Optional, TagQual.Ignored, True),
'source': (TagKind.Required, TagQual.AllSame, False),
'style': (TagKind.Optional, TagQual.AllSame, True),
'title': (TagKind.Required, TagQual.Ignored, False),
'totaldiscs': (TagKind.Mapped, TagQual.AllSame, False),
'totaltracks': (TagKind.Mapped, TagQual.DiscSame, False),
'tracknumber': (TagKind.Required, TagQual.Ignored, False),
'tracktotal': (TagKind.Required, TagQual.DiscSame, False),
'upc': (TagKind.Required, TagQual.DiscSame, False),
}
known_tags_set = set(known_tags)
required_tags = {k for k, v in known_tags.items() if v[0] == TagKind.Required}
required_classical_tags = required_tags | {k for k, v in known_tags.items()
if v[0] == TagKind.ReqClassical}
optional_tags = {k for k, v in known_tags.items() if v[0] == TagKind.Optional}
identical_tags_across_discs = {k for k, v in known_tags.items() if v[1] == TagQual.AllSame}
identical_tags_within_disc = {k for k, v in known_tags.items()
if v[1] in (TagQual.AllSame, TagQual.DiscSame)}
different_tags = {k for k, v in known_tags.items() if v[1] == TagQual.AllDiff}
allowed_multivalued_tags = {k for k, v in known_tags.items() if v[2]}
mapped_tags = {
'organization': 'label',
'totaldiscs': 'disctotal',
'totaltracks': 'tracktotal',
}
sorted_tags = {
'artist': 'artist sort',
'albumartist': 'album artist sort',
'composer': 'composersort',
'conductor': 'conductorsort',
'soloists': 'soloistssort',
}
test_leading_The_tags = ['artist', 'albumartist', 'composer']
default_path = 'D:\\CDRip'
args = None
msgs = None
album_count = 0
disc_count = 0
track_count = 0
warn_count = 0
def parse_args():
global args
parser = argparse.ArgumentParser(description='Check FLAC files for tag consistency.')
parser.add_argument('path', nargs='*', default=[default_path],
help='root of the tree to search for albums of FLAC files (default: %s)' % default_path)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show every album processed, not just ones with issues')
parser.add_argument('-m', '--missing', action='store_false',
help="Don't warn about missing required tags")
parser.add_argument('-M', '--mapping', action='store_false',
help="Don't warn about mapping obsolete tags to newer ones")
parser.add_argument('-o', '--other', action='store_false',
help="Don't warn about missing non-FLAC files")
parser.add_argument('-p', '--pause', action='store_true',
help='Pause before exiting')
parser.add_argument('-s', '--sort-tag-mismatch', action='store_false',
help="Don't warn about mismatches between a tag and the "
"sort tag variant (e.g. Artist vs. Artist Sort)")
parser.add_argument('-S', '--no-sort-tag', action='store_true',
help="Warn if a sort tag (e.g. Artist Sort) is missing on all "
"tracks of a disc, not just some")
parser.add_argument('-t', '--tag', action='append',
help='Find all tracks with the given tag')
args = parser.parse_args()
if args.tag:
def flatten_args(el):
if isinstance(el, collections.Iterable) and not isinstance(el, str):
return [a for b in el for a in flatten_args(b)]
else:
return [el]
tags = flatten_args([_.split(',') for _ in args.tag])
args.tag = {_.strip().lower() for _ in tags}
else:
args.tag = set()
def track_list(tracks, track_count):
# Format a list of tracks as something like 'Tracks 1, 4-6, 10', with
# special cases for 'All tracks' and a single track. The track list
# should already be sorted on entry.
if len(tracks) == track_count:
return 'All tracks'
if len(tracks) == 1:
return 'Track %d' % tracks[0]
grouped = list(zip(tracks, tracks))
pos = len(grouped)
while pos > 1:
pos -= 1
if grouped[pos][0] == grouped[pos - 1][1] + 1:
grouped[pos - 1] = (grouped[pos - 1][0], grouped[pos][1])
grouped.pop(pos)
return 'Tracks %s' % ', '.join(['%d' % x1 if x1 == x2 else '%d-%d' % (x1, x2)
for x1, x2 in grouped])
def output_dict_of_bad_tracks(track_dict, disc, method=None):
# Helper for messages which display something like:
# All tracks: message1
# Track 1: message2
# Tracks 4-7: message3
# track_dict is the dictionary mapping the messages to output as the
# key to the list of pertinent tracks as the value.
method = method or msgs.error
for message, tracks in sorted(track_dict.items(), key=lambda i: i[1]):
method(' %s: %s' % (track_list(tracks, len(disc)), message))
def check_disc_numbers(album):
# Check the disc numbers and disctotal tag to find missing discs or tracks
# with an unreasonable/inconsistent disctotal.
disctotals = defaultdict(list)
for discnum, disc in album.items():
for tracknum, track in disc.items():
try:
disctotal = flatten_tag(track['disctotal'])
except:
# Tag mapping not yet done, so check old form of tag
disctotal = flatten_tag(track.get('totaldiscs'))
try:
disctotal = int(disctotal)
except:
pass
disctotals[disctotal].append((discnum, tracknum))
if len(disctotals) != 1:
msgs.error('Inconsistent values of disctotal:')
for disctotal in sorted(disctotals):
msgs.error(' %s: Tracks ' % disctotal +
', '.join(['%d/%d' % x for x in sorted(disctotals[disctotal])]))
try:
disc_count = max([x for x in album if isinstance(x, int)])
except:
disc_count = 1
else:
disc_count = next(iter(disctotals.keys()))
album.disc_count = disc_count
disc_set = set(album)
expected_disc_set = set(range(1, disc_count + 1))
missing_discs = expected_disc_set - disc_set
extra_discs = disc_set - expected_disc_set
if missing_discs:
msgs.error('Missing Discs: ' + ', '.join(map(str, sorted(missing_discs))))
if extra_discs:
msgs.error('Unexpected Discs: ' + ', '.join(map(str, sorted(extra_discs))))
def check_identical_tags_across_discs(album):
# Check for tags which should be identical across all discs within a multi-disc set.
# Only checks one track per disc, since check_identical_tags will do a more exhaustive
# check for tracks within a disc.
if len(album) == 1:
return
mismatches = []
tag_values = {}
for tag in identical_tags_across_discs & album.tagset:
for discnum, disc in album.items():
if tag not in disc.tagset:
mismatches.append(tag)
break
for tracknum, track in disc.items():
if tag in track:
tag_value = track[tag]
break
else:
assert False or "Didn't find tag in any track as expected"
if tag not in tag_values:
tag_values[tag] = tag_value
else:
try:
if tag_values[tag] == tag_value:
continue
except:
pass
mismatches.append(tag)
break
if mismatches:
msgs.error('Tags not identical across discs: ' + ', '.join(sorted(mismatches)))
def check_nontag_info(album):
# Make sure the expected non-FLAC files are found in the album directory.
# These are
# * folder.jpg
# * [AlbumArtist] - [Album] (Disc #).cue -- (Disc #) only for multi-disc
# * [AlbumArtist] - [Album] (Disc #).txt -- (Disc #) only for multi-disc
if not args.other:
return
def check_for_file(filename):
f = replace_reserved_chars(filename)
if not os.path.isfile(os.path.join(album.path, f)):
msgs.error("File '%s' not found" % f)
def find_tag(tag):
for disc in album.values():
for track in disc.values():
if tag in track:
return flatten_tag(track[tag])
check_for_file('folder.jpg')
album_artist = find_tag('albumartist')
album_title = find_tag('album')
if album_artist and album_title:
filename = str(album_artist + ' - ' + album_title)
if album.disc_count == 1:
check_for_file(filename + '.cue')
check_for_file(filename + '.txt')
else:
for discnum in album:
discname = '%s (Disc %s)' % (filename, discnum)
check_for_file(discname + '.cue')
check_for_file(discname + '.txt')
def handle_mapped_tags(disc):
# Check for any tags which are obsolete and mapped to newer tags.
# If old tag found and new tag not found, add new tag with old tag's value.
# If both old and new tag found, warn if they don't have the same value.
added = []
mismatch = []
for old_tag, new_tag in mapped_tags.items():
if old_tag not in disc.tagset:
continue
disc.tagset |= {new_tag}
added_tracks = []
mismatch_tracks = []
for tracknum, track in disc.items():
if new_tag not in track:
added_tracks.append(tracknum)
track[new_tag] = track[old_tag]
track.tagset |= {new_tag}
continue
try:
if track[old_tag] == track[new_tag]:
continue
except:
pass
mismatch_tracks.append(tracknum)
if args.mapping and (added_tracks or mismatch_tracks):
def msg_helper(kind, tracks):
msg = ' %s %s %s in ' % (old_tag, kind, new_tag)
if len(tracks) == len(disc):
msg += 'all tracks'
else:
msg += 'tracks ' + ', '.join(map(str, sorted(tracks)))
return msg
if added_tracks:
added.append(msg_helper('->', added_tracks))
if mismatch_tracks:
mismatch.append(msg_helper('!=', mismatch_tracks))
if added:
msgs.error('Obsolete tags need updating:')
for msg in sorted(added):
msgs.error(msg)
if mismatch:
msgs.error('Obsolete and updated tags both present with different values:')
for msg in sorted(mismatch):
msgs.error(msg)
def check_profile(disc):
# Make sure the 'Classical' profile is only used for the 'Classical' genre
# Don't bother testing if the genre and profile aren't identical across tracks.
# Sets disc.classical, so run this soon after finding identical tags.
if 'genre' not in disc.identical or 'profile' not in disc.identical:
disc.classical = False
return
genre = flatten_tag(disc.identical['genre'])
profile = flatten_tag(disc.identical['profile'])
classical_genre = (genre.lower() == 'classical')
classical_profile = (profile.lower() == 'classical')
disc.classical = classical_profile
if classical_genre != classical_profile:
msgs.error("Unexpected profile '%s' for genre '%s'" % (profile, genre))
def check_inaccurate_rips(disc):
# Check for any rips that failed the AccurateRip test
inaccurate_tracks = {}
for tracknum, track in disc.items():
rip_result = flatten_tag(track.get('accurateripresult', ''))
if 'inaccurate' in rip_result.lower():
inaccurate_tracks[tracknum] = rip_result
if inaccurate_tracks:
msgs.error('AccurateRip verification failed:')
for tracknum, rip_result in inaccurate_tracks.items():
msgs.error(' Track %d: %s' % (tracknum, rip_result))
def check_missing_tags(disc):
# Check that tags which should be present are actually present in all tracks
if not args.missing:
return
desired_tags = required_classical_tags if disc.classical else required_tags
disc_missing_tags = desired_tags - disc.tagset
tracks_missing_tags = defaultdict(list)
track_desired_tags = desired_tags - disc_missing_tags
for tracknum, track in disc.items():
track_missing_tags = track_desired_tags - track.tagset
if track_missing_tags:
missing_tags_str = ', '.join(sorted(track_missing_tags))
tracks_missing_tags[missing_tags_str].append(tracknum)
if disc_missing_tags or tracks_missing_tags:
msgs.error('Missing Tags:')
if disc_missing_tags:
msgs.error(' All tracks: %s' % ', '.join(sorted(disc_missing_tags)))
if tracks_missing_tags:
output_dict_of_bad_tracks(tracks_missing_tags, disc)
def check_unknown_tags(disc):
# Check that all tags are in the known_tags dictionary
unknown_tags = disc.tagset - known_tags_set
if not unknown_tags:
return
msgs.error('Unknown Tags:')
disc_unknown_tags = unknown_tags & disc.common
if disc_unknown_tags:
msgs.error(' All tracks: %s' % ', '.join(sorted(disc_unknown_tags)))
tracks_unknown_tags_set = unknown_tags - disc_unknown_tags
if not tracks_unknown_tags_set:
return
for tracknum, track in disc.items():
track_unknown_tags = tracks_unknown_tags_set & track.tagset
if track_unknown_tags:
msgs.error(' Track #%d: %s' % (tracknum, ', '.join(sorted(track_unknown_tags))))
def check_multivalued_tags(disc):
# Check that tags with more than one value for a track are expected.
for tracknum, track in disc.items():
multivalued_tags = {tag for tag in track if len(track[tag]) > 1}
unexpected = multivalued_tags - allowed_multivalued_tags
if unexpected:
msgs.error("Track #%d: Unexpected multivalued tracks '%s'" %
(tracknum, "', '".join(sorted(unexpected))))
def check_track_numbers(disc):
# Check the track numbers to find missing tracks or tracks with unreasonable
# track numbers
track_count = None
tracktotal = flatten_tag(disc.identical.get('tracktotal'))
if tracktotal is None:
msgs.error("Can't determine last track #: tracktotal not same in all tracks")
else:
try:
track_count = int(tracktotal)
except:
msgs.error("Can't determine last track #: tracktotal %s not an int" % tracktotal)
track_set = set(disc)
if track_count is not None:
expected_track_set = set(range(1, track_count + 1))
else:
expected_track_set = set(range(1, max(track_set) + 1))
missing_tracks = expected_track_set - track_set
extra_tracks = track_set - expected_track_set
if missing_tracks:
msgs.error('Missing Tracks: ' + ', '.join(map(str, sorted(missing_tracks))))
if extra_tracks:
msgs.error('Unexpected Tracks: ' + ', '.join(map(str, sorted(extra_tracks))))
def check_identical_tags(disc):
# Check that tags which should be identical across all tracks are identical
mismatch_tags = (identical_tags_within_disc & disc.tagset) - disc.identical.tagset
if mismatch_tags:
msgs.error('Tags not same across all tracks: ' + ', '.join(mismatch_tags))
def check_different_tags(disc):
# Check that tags which should be different across all tracks are different
for tag in different_tags:
if tag not in disc.tagset:
continue
tag_values = {}
need_warning = False
for tracknum, track in disc.items():
if tag in track:
tag_value = flatten_tag(track[tag])
if tag_value in tag_values:
need_warning = True
tag_values[tag_value].append(tracknum)
else:
tag_values[tag_value] = [tracknum]
if need_warning:
msgs.error("Tag '%s' duplicated in multiple tracks:" % tag)
for tag_value, tracks in tag_values.items():
if len(tracks) > 1:
msgs.error(' %s in tracks ' % tag_value + ', '.join(map(str, tracks)))
def check_sort_tags(disc):
# Check that the sorted version of tags (e.g. 'artist sort' for 'artist')
# are a reasonable match for the corresponding tag. The number of values
# in corresponding tags should match, and the sorted tags should either
# match the main tags, or be transformable by rearranging. For example,
# if the 'artist' tag has the two values 'John Smith' and 'Jane Doe', the
# 'artist sort' tag should also have two values, the first of which is
# either 'John Smith' or 'Smith, John', and the second either 'Jane Doe' or
# 'Doe, Jane'. Also warn if the sorted tag is missing on some, but not all,
# tracks (warn on missing everywhere under cmdline option).
if not args.sort_tag_mismatch:
return
for tag, sort_tag in sorted_tags.items():
if (sort_tag not in disc.tagset and
not args.no_sort_tag and
not disc.classical):
continue
missing = []
mismatch = defaultdict(list)
for tracknum, track in disc.items():
if tag not in track.tagset and sort_tag not in track.tagset:
continue # ignore if both tags not present
len1 = len(track[tag])
if sort_tag not in track.tagset:
missing.append(tracknum)
continue
tag_val = track.get(tag, [])
sort_tag_val = track[sort_tag]
if len(tag_val) == len(sort_tag_val):
# Both tags have the same number of values. Pair up the values
# and compare them.
for val1, val2 in zip(tag_val, sort_tag_val):
if val1 == val2:
continue
def canon_val(val):
# Split a value from a tag into its constituent words
# after removing commas and '[...]' comments. Get rid
# of the words 'The' or 'Los' at either the beginning or
# end of the list of words. Replace short last-name
# prefixes with their lower case version to avoid case
# differences. E.g. 'Alex de Grassi' would be sorted as
# 'De Grassi, Alex', but 'de' vs. 'De' is not a problem.
# Return the final list sorted.
val = re.sub(r'\[[^]]*\]', '', val)
words = val.replace(',', '').split()
ignored = {'The', 'Los'}
if len(words) > 1 and words[-1] in ignored:
words.pop()
if len(words) > 1 and words[0] in ignored:
words.pop(0)
force_lower = {'de', 'van'}
for index, word in enumerate(words):
word_lower = word.lower()
if word_lower in force_lower:
words[index] = word_lower
return sorted(words)
if canon_val(val1) != canon_val(val2):
break
else:
# All paired values could be matched, track is fine
continue
# Tags either have different numbers of values, or those values
# couldn't be matched.
key = (flatten_tag(tag_val), flatten_tag(sort_tag_val))
mismatch[key].append(tracknum)
if missing or mismatch:
msgs.error("Incompatible values for tags '%s' and '%s':" % (tag, sort_tag))
errmsgs = []
if missing:
errmsgs.append((sorted(missing), "Tag '%s' not found" % sort_tag))
for vals, tracks in mismatch.items():
errmsgs.append((sorted(tracks), "'%s' versus '%s'" % vals))
for tracks, msg in sorted(errmsgs):
msgs.error(" %s: %s" % (track_list(tracks, len(disc)), msg))
def check_dups_in_tags(disc):
# Check tags with multiple values in lists, and make sure none of the
# items are duplicated within the list (e.g. composer = [Brian Eno, Brian Eno])
for tracknum, track in disc.items():
for tag, tag_value in track.items():
if len(tag_value) > 1:
tag_value_set = set(tag_value)
if len(tag_value) != len(tag_value_set):
msgs.error("Track %d has duplicate value in tag '%s': %s" %
(tracknum, tag, '; '.join(tag_value)))
def check_leading_the(disc):
# Check if the 'artist', 'albumartist', or 'composer' tags include entries
# that start with a leading 'The', e.g. 'The Beatles' instead of 'Beatles, The'.
error_items = {} # { bad tag value : (set of tags, set of tracks) }
def record_error_item(tag, tag_value, tracks):
error_value = error_items.get(tag_value)
if error_value:
error_value = (error_value[0] | {tag}, error_value[1] | tracks)
else:
error_value = ({tag}, tracks)
error_items[tag_value] = error_value
for tag in test_leading_The_tags:
if tag in disc.identical:
for tag_value in disc.identical[tag]:
if tag_value[0:4].lower() == 'the ':
record_error_item(tag, tag_value, set(disc))
else:
for tracknum in disc:
for tag_value in disc[tracknum].get(tag, ''):
if tag_value[0:4].lower() == 'the ':
record_error_item(tag, tag_value, {tracknum})
for tag_value, error_tuple in error_items.items():
fmt_tracks = track_list(sorted(error_tuple[1]), len(disc)).lower()
msgs.error("'%s' should be '%s': tag%s '%s' in %s" %
(tag_value, ', '.join((tag_value[4:], tag_value[0:3])),
's' if len(error_tuple[0]) != 1 else '',
"', '".join(sorted(error_tuple[0])),
fmt_tracks))
def check_multiple_artists(disc):
# If the 'artist' tag isn't identical across tracks and the 'albumartist'
# tag isn't found in each track's 'artist' tag, then make sure the
# 'albumartist' tag is either 'Soundtrack', 'Various Artists', or 'TV Theme'
# depending on the 'genre' tag. Not done for classical profile, or if the
# tag 'NoMultipleArtistTest' is found in all tracks.
if (disc.classical or
'artist' in disc.identical or
'albumartist' not in disc.identical or
'genre' not in disc.identical or
'nomultipleartisttest' in disc.common):
return
album_artist = flatten_tag(disc.identical['albumartist'])
album_artist_low = album_artist.lower()
genre = flatten_tag(disc.identical['genre']).lower()
for tracknum, track in disc.items():
artists = flatten_tag(track['artist'])
if album_artist_low not in artists.lower():
expected = ['Soundtrack'] if genre == 'soundtrack' else ['Various Artists', 'TV Theme']
if album_artist_low not in [x.lower() for x in expected]:
msgs.error("AlbumArtist should be '%s', not '%s'" %
("' or '".join(expected), album_artist))
break
def check_compilation(disc):
# Run checks for compilations:
# * If profile is 'Classical', make sure the 'composer' tag is not identical across
# all tracks.
# * If genre is 'Soundtrack', make sure the AlbumArtist is also 'Soundtrack'
# * For other genres, make sure the AlbumArtist is either 'Various Artists' or
# 'TV Theme'
# The test is ignored if a tag 'NoCompilationTest' exists in all tracks.
if 'compilation' not in disc.tagset:
return
if 'nocompilationtest' in disc.common:
return
if disc.classical:
if 'composer' in disc.identical:
msgs.error("For classical compilation, Composer should not be '%s' for all tracks" %
flatten_tag(disc.identical['composer']))
return
if 'albumartist' not in disc.identical or 'genre' not in disc.identical:
return
album_artist = flatten_tag(disc.identical['albumartist'])
album_artist_low = album_artist.lower()
genre = flatten_tag(disc.identical['genre']).lower()
if genre == 'soundtrack':
if album_artist_low != 'soundtrack':
msgs.error("For soundtrack compilation, AlbumArtist should be 'Soundtrack', not '%s'" %
album_artist)
else:
if album_artist_low not in ['various artists', 'tv theme']:
msgs.error("For this compilation, AlbumArtist should be 'Various Artists', not '%s'" %
album_artist)
def check_orchestra(disc):
# For classical discs, make sure there's an orchestra tag if the conductor
# tag exists. Also make sure there's an orchestra tag if it looks like the
# album artist or artist tags name an orchestra.
if not disc.classical:
return
if 'conductor' in disc.tagset:
no_orchestra = []
for tracknum, track in disc.items():
if 'conductor' in track and 'orchestra' not in track:
no_orchestra.append(tracknum)
if no_orchestra:
msgs.error("Tag 'conductor' but no tag 'orchestra': %s" % track_list(no_orchestra, len(disc)))
# Look for artist names that imply an orchestra, verify the orchestra tag
# exists if found.
bad_tracks = defaultdict(list)
names = ('orchestra', 'symphon', 'philharmon', 'sinfoni')
for tracknum, track in disc.items():
artist = flatten_tag(track.get('artist', ''))
artist_low = artist.lower()
for name in names:
if name in artist_low:
break
else:
continue
if 'orchestra' not in track.tagset:
bad_tracks[artist].append(tracknum)
if bad_tracks:
msgs.error("Artist tag implies an orchestra, but no 'orchestra' tag found:")
output_dict_of_bad_tracks(bad_tracks, disc)
def find_selected_tags(disc):
# Not a correctness check - display any tracks using the selected tags.
for tag in sorted(args.tag & disc.tagset):
msgs.note("Tag '%s' found:" % tag)
tag_vals = defaultdict(list)
for tracknum, track in disc.items():
if tag in track:
tag_vals[flatten_tag(track[tag])].append(tracknum)
output_dict_of_bad_tracks(tag_vals, disc, msgs.note)
def process_album(album_path):
global msgs, album_count, disc_count, track_count, warn_count
warn = False
album_count += 1
album, msgs = get_album(album_path)
if album:
check_disc_numbers(album)
check_identical_tags_across_discs(album)
check_nontag_info(album)
if msgs:
print("\nEarly checks of '%s' found problems:" % album_path)
print(msgs)
for discnum, disc in album.items():
msgs.clear()
disc_count += 1
track_count += len(disc)
handle_mapped_tags(disc)
find_common_disc_tags(disc)
find_identical_disc_tags(disc)
check_profile(disc)
check_inaccurate_rips(disc)
check_missing_tags(disc)
check_unknown_tags(disc)
check_multivalued_tags(disc)
check_track_numbers(disc)
check_identical_tags(disc)
check_different_tags(disc)
check_dups_in_tags(disc)
check_sort_tags(disc)
check_leading_the(disc)
check_multiple_artists(disc)
check_compilation(disc)
check_orchestra(disc)
find_selected_tags(disc)
if msgs or args.verbose:
album_display = album_path
try:
if int(flatten_tag(next(iter(disc.values())).get("disctotal", '1'))) != 1:
album_display += ' (Disc %d)' % discnum
except ValueError:
pass
print("\nChecking '%s'" % album_display)
if msgs:
print(msgs)
if msgs.errors or msgs.warnings:
warn_count += 1
def main():
parse_args()
for root in sorted(args.path):
for album_path in find_albums(root):
process_album(album_path)
def plural(count, name, zero='0'):
if count == 1:
return '1 ' + name
elif count == 0:
return '%s %ss' % (zero, name)
else:
return '%d %ss' % (count, name)
print("\nProcessed %s, %s, %s - %s with issues" %
(plural(album_count, 'album'), plural(disc_count, 'disc'),
plural(track_count, 'track'), plural(warn_count, 'album', zero='No')))
if args.pause:
try:
input('\nPress Enter when ready...')
except:
pass
if __name__ == '__main__':
main()
|
plucid/dBpa-scripts
|
CheckFlacTags.py
|
Python
|
mit
| 34,767
|
[
"Brian"
] |
757b643d020218c05268bae633841384f92ab66f7791b60a2e8cdea75736da07
|
'''
plot.py
Functions to plot xvg data generated by gromacs "gmx energy" package
'''
from sys import argv
import seaborn as sns
import matplotlib.pyplot as plt
xvg = "/home/antonio/Docbox/gromacs_automatizer/test/potential.xvg"
#xvg = argv[1]
#----| BASIC FUNCTIONS |----#
def load2dXVG(xvg_name):
'''
Get data from a simple 2d XVG file
:return:
'''
f = open(xvg_name, "r")
title = ""
xlabel = ""
ylabel = ""
X = []
Y = []
for line in f:
if line.startswith("@ title"):
title = line[10:-1].replace('"','')
continue
if line.startswith("@ xaxis label"):
xlabel = line[18:-1].replace('"','')
continue
if line.startswith("@ yaxis label"):
ylabel = line[18:-1].replace('"',"")
continue
if line.startswith("#") or line.startswith("@"):
continue
else:
line_split = line.split(" ")
line_data = []
for each in line_split:
if each == '':
continue
else:
line_data.append(each)
X.append(float(line_data[0]))
Y.append(float(line_data[1].replace('\n', '')))
return X,Y, title, xlabel, ylabel
def plot_xvg(X, Y, xlabel, ylabel, title, label):
'''
Plot line graph for 2d data
:param X:
:param Y:
:param xlabel:
:param ylabel:
:param title:
:return:
'''
plt.plot(X, Y, label=label)
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
plt.savefig(xvg+".png", dpi=300)
plt.close()
#---------------------#
def Plot2DXVG(xvg_name, label):
'''
:param xvg_name:
:param label:
:return:
'''
X, Y, title, xlabel, ylabel= load2dXVG(xvg_name)
plot_xvg(X=X,Y=Y,xlabel=xlabel,ylabel=ylabel,title= title,label=label)
|
gustalima/gromacs_automatizer
|
plot.py
|
Python
|
gpl-3.0
| 1,942
|
[
"Gromacs"
] |
cb668863ed73bdbee3c6cabff877fb888b9ec20c56688011e0c5dc56e8810485
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ipify_facts
short_description: Retrieve the public IP of your internet gateway
description:
- If behind NAT and need to know the public IP of your internet gateway.
version_added: '2.0'
author:
- René Moser (@resmo)
options:
api_url:
description:
- URL of the ipify.org API service.
- C(?format=json) will be appended per default.
type: str
default: https://api.ipify.org/
timeout:
description:
- HTTP connection timeout in seconds.
type: int
default: 10
version_added: "2.3"
validate_certs:
description:
- When set to C(NO), SSL certificates will not be validated.
type: bool
default: yes
version_added: "2.4"
notes:
- Visit https://www.ipify.org to get more information.
'''
EXAMPLES = r'''
# Gather IP facts from ipify.org
- name: Get my public IP
ipify_facts:
# Gather IP facts from your own ipify service endpoint with a custom timeout
- name: Get my public IP
ipify_facts:
api_url: http://api.example.com/ipify
timeout: 20
'''
RETURN = r'''
---
ipify_public_ip:
description: Public IP of the internet gateway.
returned: success
type: str
sample: 1.2.3.4
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
class IpifyFacts(object):
def __init__(self):
self.api_url = module.params.get('api_url')
self.timeout = module.params.get('timeout')
def run(self):
result = {
'ipify_public_ip': None
}
(response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
if not response:
module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
data = json.loads(to_text(response.read()))
result['ipify_public_ip'] = data.get('ip')
return result
def main():
global module
module = AnsibleModule(
argument_spec=dict(
api_url=dict(type='str', default='https://api.ipify.org/'),
timeout=dict(type='int', default=10),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
ipify_facts = IpifyFacts().run()
ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
module.exit_json(**ipify_facts_result)
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/net_tools/ipify_facts.py
|
Python
|
gpl-3.0
| 2,944
|
[
"VisIt"
] |
7e7a42910ab6ee5a4bedcc3197453920046c20a21ed70a270395c16eb81a411f
|
"""
parser.http.movieParser module (imdb package).
This module provides the classes (and the instances), used to parse the
IMDb pages on the www.imdb.com server about a movie.
E.g., for Brian De Palma's "The Untouchables", the referred
pages would be:
combined details: http://www.imdb.com/title/tt0094226/reference
plot summary: http://www.imdb.com/title/tt0094226/plotsummary
...and so on...
Copyright 2004-2018 Davide Alberani <da@erlug.linux.it>
2008-2018 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import functools
import re
import urllib.error
import urllib.parse
import urllib.request
from imdb import imdbURL_base
from imdb.Company import Company
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.utils import _Container, KIND_MAP
from .piculet import Path, Rule, Rules, preprocessors
from .utils import DOMParserBase, analyze_imdbid, build_person
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = '/'
else:
roleID += '/'
newRoles.append('<div class="_imdbpyrole" roleid="%s">%s</div>' % (
roleID, role.strip()
))
return firstHalf + ' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="character">)(.*?)(</td>)', re.I | re.M | re.S)
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::')
_reAkas = re.compile(r'<h5>also known as:</h5>.*?</div>', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x:
return x
x = x.strip()
if not x:
return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = [_f for _f in [j.strip() for j in lx] if _f]
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
_re_og_title = re.compile(
r'(.*) \((?:(?:(.+)(?= ))? ?(\d{4})(?:(–)(\d{4}| ))?|(.+))\)',
re.UNICODE
)
def analyze_og_title(og_title):
data = {}
match = _re_og_title.match(og_title)
if match:
data['title'] = match.group(1)
if match.group(3):
data['year'] = int(match.group(3))
kind = match.group(2) or match.group(6)
if kind is None:
kind = 'movie'
else:
kind = kind.lower()
kind = KIND_MAP.get(kind, kind)
data['kind'] = kind
year_separator = match.group(4)
# There is a year separator so assume an ongoing or ended series
if year_separator is not None:
end_year = match.group(5)
if end_year is not None:
data['series years'] = '%(year)d-%(end_year)s' % {
'year': data['year'],
'end_year': end_year.strip(),
}
elif kind.endswith('series'):
data['series years'] = '%(year)d-' % {'year': data['year']}
# No year separator and series, so assume that it ended the same year
elif kind.endswith('series') and 'year' in data:
data['series years'] = '%(year)d-%(year)d' % {'year': data['year']}
if data['kind'] == 'episode' and data['title'][0] == '"':
quote_end = data['title'].find('"', 1)
data['tv series title'] = data['title'][1:quote_end]
data['title'] = data['title'][quote_end + 1:].strip()
return data
def analyze_certificates(certificates):
def reducer(acc, el):
cert_re = re.compile(r'^(.+):(.+)$', re.UNICODE)
if cert_re.match(el):
acc.append(el)
elif acc:
acc[-1] = u'{}::{}'.format(
acc[-1],
el,
)
return acc
certificates = [el.strip() for el in certificates.split('\n') if el.strip()]
return functools.reduce(reducer, certificates, [])
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "combined details" (and if instance.mdparse is
True also for the "main details") page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
mparser = DOMHTMLMovieParser()
result = mparser.parse(combined_details_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='title',
extractor=Path('//meta[@property="og:title"]/@content',
transform=analyze_og_title)
),
# parser for misc sections like 'casting department', 'stunts', ...
Rule(
key='misc sections',
extractor=Rules(
foreach='//h4[contains(@class, "ipl-header__content")]',
rules=[
Rule(
key=Path('./@name', transform=lambda x: x.replace('_', ' ')),
extractor=Rules(
foreach='../../following-sibling::table[1]//tr',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[1]/a[@href]/@href')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link'))
)
)
)
]
)
),
Rule(
key='cast',
extractor=Rules(
foreach='//table[@class="cast_list"]//tr',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[2]/a/@href')
),
Rule(
key='roleID',
extractor=Path('./td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/')
)
)
),
Rule(
key='myrating',
extractor=Path('//span[@id="voteuser"]//text()')
),
Rule(
key='plot summary',
extractor=Path('//td[starts-with(text(), "Plot")]/..//p/text()',
transform=lambda x: x.strip().rstrip('|').rstrip())
),
Rule(
key='genres',
extractor=Path(
foreach='//td[starts-with(text(), "Genre")]/..//li/a',
path='./text()'
)
),
Rule(
key='runtimes',
extractor=Path(
foreach='//td[starts-with(text(), "Runtime")]/..//li',
path='./text()',
transform=lambda x: x.strip().replace(' min', '')
)
),
Rule(
key='countries',
extractor=Path(
foreach='//td[starts-with(text(), "Countr")]/..//li/a',
path='./text()'
)
),
Rule(
key='country codes',
extractor=Path(
foreach='//td[starts-with(text(), "Countr")]/..//li/a',
path='./@href',
transform=lambda x: x.split('/')[2].strip().lower()
)
),
Rule(
key='language',
extractor=Path(
foreach='//td[starts-with(text(), "Language")]/..//li/a',
path='./text()'
)
),
Rule(
key='language codes',
extractor=Path(
foreach='//td[starts-with(text(), "Language")]/..//li/a',
path='./@href',
transform=lambda x: x.split('/')[2].strip()
)
),
Rule(
key='color info',
extractor=Path(
foreach='//td[starts-with(text(), "Color")]/..//li/a',
path='./text()',
transform=lambda x: x.replace(' (', '::(')
)
),
Rule(
key='aspect ratio',
extractor=Path(
'//td[starts-with(text(), "Aspect")]/..//li/text()',
transform=str.strip
)
),
Rule(
key='sound mix',
extractor=Path(
foreach='//td[starts-with(text(), "Sound Mix")]/..//li/a',
path='./text()',
transform=lambda x: x.replace(' (', '::(')
)
),
Rule(
key='certificates',
extractor=Path(
'//td[starts-with(text(), "Certificat")]/..//text()',
transform=analyze_certificates
)
),
# Collects akas not encosed in <i> tags.
Rule(
key='other akas',
extractor=Path(
'//section[contains(@class, "listo")]'
'//td[starts-with(text(), "Also Known As")]/..//ul//text()',
transform=makeSplitter(
sep='::', origNotesSep='" - ', newNotesSep='::', strip='"'
)
)
),
Rule(
key='creator',
extractor=Rules(
foreach='//td[starts-with(text(), "Creator")]/..//a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='thin writer',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Writer")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='thin director',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Director")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='top/bottom rank',
extractor=Path(
'//li[@class="ipl-inline-list__item"]//a[starts-with(@href, "/chart/")]/text()'
)
),
Rule(
key='original air date',
extractor=Path('//span[@imdbpy="airdate"]/text()')
),
Rule(
key='series years',
extractor=Path(
'//div[@id="tn15title"]//span[starts-with(text(), "TV series")]/text()',
transform=lambda x: x.replace('TV series', '').strip()
)
),
Rule(
key='season/episode',
extractor=Path(
'//div[@class="titlereference-overview-season-episode-section"]/ul//text()',
transform=str.strip
)
),
Rule(
key='number of episodes',
extractor=Path(
'//a[starts-with(text(), "All Episodes")]/text()',
transform=lambda x: int(x.replace('All Episodes', '').strip()[1:-1])
)
),
Rule(
key='episode number',
extractor=Path(
'//div[@id="tn15epnav"]/text()',
transform=lambda x: int(re.sub(r'[^a-z0-9 ]', '',
x.lower()).strip().split()[0]))
),
Rule(
key='previous episode',
extractor=Path(
'//span[@class="titlereference-overview-episodes-links"]'
'//a[contains(text(), "Previous")]/@href',
transform=analyze_imdbid
)
),
Rule(
key='next episode',
extractor=Path(
'//span[@class="titlereference-overview-episodes-links"]'
'//a[contains(text(), "Next")]/@href',
transform=analyze_imdbid
)
),
Rule(
key='number of seasons',
extractor=Path(
'//span[@class="titlereference-overview-years-links"]/../a[1]/text()',
transform=int
)
),
Rule(
key='tv series link',
extractor=Path('//a[starts-with(text(), "All Episodes")]/@href')
),
Rule(
key='akas',
extractor=Path(
foreach='//i[@class="transl"]',
path='./text()',
transform=lambda x: x
.replace(' ', ' ')
.rstrip('-')
.replace('" - ', '"::', 1)
.strip('"')
.replace(' ', ' ')
)
),
Rule(
key='production status',
extractor=Path(
'//td[starts-with(text(), "Status:")]/..//div[@class="info-content"]//text()',
transform=lambda x: x.strip().split('|')[0].strip().lower()
)
),
Rule(
key='production status updated',
extractor=Path(
'//td[starts-with(text(), "Status Updated:")]/'
'..//div[@class="info-content"]//text()',
transform=str.strip
)
),
Rule(
key='production comments',
extractor=Path(
'//td[starts-with(text(), "Comments:")]/'
'..//div[@class="info-content"]//text()',
transform=str.strip
)
),
Rule(
key='production note',
extractor=Path(
'//td[starts-with(text(), "Note:")]/'
'..//div[@class="info-content"]//text()',
transform=str.strip
)
),
Rule(
key='blackcatheader',
extractor=Rules(
foreach='//b[@class="blackcatheader"]',
rules=[
Rule(
key=Path('./text()', transform=str.lower),
extractor=Rules(
foreach='../ul/li',
rules=[
Rule(
key='name',
extractor=Path('./a//text()')
),
Rule(
key='comp-link',
extractor=Path('./a/@href')
),
Rule(
key='notes',
extractor=Path('./text()')
)
],
transform=lambda x: Company(
name=x.get('name') or '',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or '').strip()
)
)
)
]
)
),
Rule(
key='rating',
extractor=Path('(//span[@class="ipl-rating-star__rating"])[1]/text()')
),
Rule(
key='votes',
extractor=Path('//span[@class="ipl-rating-star__total-votes"][1]/text()')
),
Rule(
key='cover url',
extractor=Path('//img[@alt="Poster"]/@src')
)
]
preprocessors = [
('/releaseinfo">', '"><span imdbpy="airdate">'),
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I), r'</div><div>\1'),
('<small>Full cast and crew for<br>', ''),
('<td> </td>', '<td>...</td>'),
(re.compile(r'<span class="tv-extra">TV mini-series(\s+.*?)</span>', re.I),
r'<span class="tv-extra">TV series\1</span> (mini)'),
(_reRolesMovie, _manageRoles),
(_reAkas, _replaceBR)
]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
preprocessors.remove(dom, '//span[@class="pro-link"]')
# Remove some 'more' links (keep others, like the one around
# the number of votes).
preprocessors.remove(dom, '//a[@class="tn15more"][starts-with(@href, "/title/")]')
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in list(data.keys()):
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = [x for x in value if x.personID is not None]
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
for key in ['title']:
if (key in data) and isinstance(data[key], dict):
subdata = data[key]
del data[key]
data.update(subdata)
misc_sections = data.get('misc sections')
if misc_sections is not None:
for section in misc_sections:
data.update(section)
del data['misc sections']
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
nakas = []
for aka in akas:
aka = aka.strip()
if aka.endswith('" -'):
aka = aka[:-3].rstrip()
nakas.append(aka)
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if nakas:
data['akas'] = nakas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', '')
for x in data['runtimes']]
if 'number of seasons' in data:
data['seasons'] = [str(i) for i in range(1, data['number of seasons'] + 1)]
# data['number of seasons'] = seasons[-1] if seasons else len(data['seasons'])
if 'season/episode' in data:
tokens = data['season/episode'].split('Episode')
try:
data['season'] = int(tokens[0].split('Season')[1])
except:
data['season'] = 'unknown'
try:
data['episode'] = int(tokens[1])
except:
data['episode'] = 'unknown'
del data['season/episode']
# if 'original air date' in data:
# oid = self.re_space.sub(' ', data['original air date']).strip()
# data['original air date'] = oid
# aid = self.re_airdate.findall(oid)
# if aid and len(aid[0]) == 3:
# date, season, episode = aid[0]
# date = date.strip()
# try:
# season = int(season)
# except ValueError:
# pass
# try:
# episode = int(episode)
# except ValueError:
# pass
# if date and date != '????':
# data['original air date'] = date
# else:
# del data['original air date']
# # Handle also "episode 0".
# if season or isinstance(season, int):
# data['season'] = season
# if episode or isinstance(season, int):
# data['episode'] = episode
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top rated movies: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom rated movies: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
data['episode of']['kind'] = 'tv series'
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if data['rating'] == 0:
del data['rating']
if 'votes' in data:
try:
votes = data['votes'].replace('(', '').replace(')', '').replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
xplot = x.get('plot', '').strip()
if xauthor:
xplot += '::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example:
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
rules = [
Rule(
key='plot',
extractor=Rules(
foreach='//ul[@id="plot-summaries-content"]/li',
rules=[
Rule(
key='plot',
extractor=Path('./p//text()')
),
Rule(
key='author',
extractor=Path('.//div[@class="author-container"]//a/text()')
)
],
transform=_process_plotsummary
)
),
Rule(
key='synopsis',
extractor=Path(
foreach='//ul[@id="plot-synopsis-content"]',
path='.//li//text()'
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//li[@id="no-summary-content"]')
return dom
def postprocess_data(self, data):
if 'synopsis' in data and data['synopsis'][0] and 'a Synopsis for this title' in data['synopsis'][0]:
del data['synopsis']
return data
def _process_award(x):
award = {}
_award = x.get('award')
if _award is not None:
_award = _award.strip()
award['award'] = _award
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip()
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
rules = [
Rule(
key='awards',
extractor=Rules(
foreach='//table//big',
rules=[
Rule(
key=Path('./a'),
extractor=Rules(
foreach='./ancestor::tr[1]/following-sibling::tr/td[last()][not(@colspan)]',
rules=[
Rule(
key='year',
extractor=Path('./td[1]/a/text()')
),
Rule(
key='result',
extractor=Path('../td[2]/b/text()')
),
Rule(
key='award',
extractor=Path('./td[3]/text()')
),
Rule(
key='category',
extractor=Path('./text()[1]')
),
Rule(
key='with',
extractor=Path(
'./small[starts-with(text(), "Shared with:")]/'
'following-sibling::a[1]/text()'
)
),
Rule(
key='notes',
extractor=Path('./small[last()]//text()')
),
Rule(
key='anchor',
extractor=Path('.//text()')
)
],
transform=_process_award
)
)
]
)
),
Rule(
key='recipients',
extractor=Rules(
foreach='//table//big',
rules=[
Rule(
key=Path('./a'),
extractor=Rules(
foreach='./ancestor::tr[1]/following-sibling::tr'
'/td[last()]/small[1]/preceding-sibling::a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
),
Rule(
key='anchor',
extractor=Path('..//text()')
)
]
)
)
]
)
)
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span - 1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for key in list(data.keys()):
dom = self.get_dom(key)
assigner = self.xpath(dom, "//a/text()")[0]
for entry in data[key]:
if 'name' not in entry:
if not entry:
continue
# this is an award, not a recipient
entry['assigner'] = assigner.strip()
# find the recipients
matches = [p for p in data[key]
if 'name' in p and (entry['anchor'] == p['anchor'])]
if self.subject == 'title':
recipients = [
Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches
]
entry['to'] = recipients
elif self.subject == 'name':
recipients = [
Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches
]
entry['for'] = recipients
nd.append(entry)
del entry['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
rules = [
Rule(
key='taglines',
extractor=Path(
foreach='//div[@id="taglines_content"]/div',
path='.//text()'
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//div[@id="taglines_content"]/div[@class="header"]')
preprocessors.remove(dom, '//div[@id="taglines_content"]/div[@id="no_content"]')
return dom
def postprocess_data(self, data):
if 'taglines' in data:
data['taglines'] = [tagline.strip() for tagline in data['taglines']]
return data
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
rules = [
Rule(
key='keywords',
extractor=Path(
foreach='//a[starts-with(@href, "/keyword/")]',
path='./text()',
transform=lambda x: x.lower().replace(' ', '-')
)
)
]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='alternate versions',
extractor=Path(
foreach='//ul[@class="trivia"]/li',
path='.//text()',
transform=str.strip
)
)
]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='trivia',
extractor=Path(
foreach='//div[@class="sodatext"]',
path='.//text()',
transform=str.strip
)
)
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
preprocessors.remove(dom, '//span[@class="linksoda"]')
return dom
class DOMHTMLSoundtrackParser(DOMParserBase):
_defGetRefs = True
preprocessors = [('<br />', '\n'), ('<br>', '\n')]
rules = [
Rule(
key='soundtrack',
extractor=Path(
foreach='//div[@class="list"]//div',
path='.//text()',
transform=str.strip
)
)
]
def postprocess_data(self, data):
if 'soundtrack' in data:
nd = []
for x in data['soundtrack']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix + len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='crazy credits',
extractor=Path(
foreach='//ul/li/tt',
path='.//text()',
transform=lambda x: x.replace('\n', ' ').replace(' ', ' ')
)
)
]
def _process_goof(x):
if x['spoiler_category']:
return x['spoiler_category'].strip() + ': SPOILER: ' + x['text'].strip()
else:
return x['category'].strip() + ': ' + x['text'].strip()
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='goofs',
extractor=Rules(
foreach='//div[@class="soda odd"]',
rules=[
Rule(
key='text',
extractor=Path('./text()')
),
Rule(
key='category',
extractor=Path('./preceding-sibling::h4[1]/text()')
),
Rule(
key='spoiler_category',
extractor=Path('./h4/text()')
)
],
transform=_process_goof
)
)
]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='quotes_odd',
extractor=Path(
foreach='//div[@class="quote soda odd"]',
path='.//text()',
transform=lambda x: x
.strip()
.replace(' \n', '::')
.replace('::\n', '::')
.replace('\n', ' ')
)
),
Rule(
key='quotes_even',
extractor=Path(
foreach='//div[@class="quote soda even"]',
path='.//text()',
transform=lambda x: x
.strip()
.replace(' \n', '::')
.replace('::\n', '::')
.replace('\n', ' ')
)
)
]
preprocessors = [
(re.compile('<a href="#" class="hidesoda hidden">Hide options</a><br>', re.I), '')
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
preprocessors.remove(dom, '//span[@class="linksoda"]')
preprocessors.remove(dom, '//div[@class="sharesoda_pre"]')
return dom
def postprocess_data(self, data):
quotes = data.get('quotes_odd', []) + data.get('quotes_even', [])
if not quotes:
return {}
quotes = [q.split('::') for q in quotes]
return {'quotes': quotes}
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
rules = [
Rule(
key='release dates',
extractor=Rules(
foreach='//table[@id="release_dates"]//tr',
rules=[
Rule(
key='country',
extractor=Path('.//td[1]//text()')
),
Rule(
key='date',
extractor=Path('.//td[2]//text()')
),
Rule(
key='notes',
extractor=Path('.//td[3]//text()')
)
]
)
),
Rule(
key='akas',
extractor=Rules(
foreach='//table[@id="akas"]//tr',
rules=[
Rule(
key='title',
extractor=Path('./td[1]/text()')
),
Rule(
key='countries',
extractor=Path('./td[2]/text()')
)
]
)
)
]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')
]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data):
return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date):
continue
country = country.strip()
date = date.strip()
if not (country and date):
continue
notes = i['notes']
info = '%s::%s' % (country, date)
if notes:
info += notes
rl.append(info)
if releases:
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = (aka.get('title') or '').strip()
if not title:
continue
countries = (aka.get('countries') or '').split(',')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s::%s' % (title, country.strip()))
if akas:
del data['akas']
if nakas:
data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\s*median\s*=\s*([0-9])', re.I)
rules = [
Rule(
key='votes',
extractor=Rules(
foreach='//th[@class="firstTableCoulmn"]/../../tr',
rules=[
Rule(
key='ordinal',
extractor=Path('./td[1]/div//text()')
),
Rule(
key='votes',
extractor=Path('./td[3]/div/div//text()')
)
]
)
),
Rule(
key='mean and median',
extractor=Path(
'//div[starts-with(normalize-space(text()), "Arithmetic mean")]/text()'
)
),
Rule(
key='demographics',
extractor=Rules(
foreach='//div[@class="smallcell"]',
rules=[
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='rating',
extractor=Path('..//div[@class="bigcell"]//text()')
),
Rule(
key='votes',
extractor=Path('./a/text()')
)
]
)
)
]
def postprocess_data(self, data):
nd = {}
demographics = data.get('demographics')
if demographics:
dem = {}
for dem_data in demographics:
link = (dem_data.get('link') or '').strip()
votes = (dem_data.get('votes') or '').strip()
rating = (dem_data.get('rating') or '').strip()
if not (link and votes and rating):
continue
eq_idx = link.rfind('=')
if eq_idx == -1:
continue
info = link[eq_idx + 1:].replace('_', ' ')
try:
votes = int(votes.replace(',', ''))
except Exception:
continue
try:
rating = float(rating)
except Exception:
continue
dem[info] = {'votes': votes, 'rating': rating}
nd['demographics'] = dem
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for v_info in votes:
ordinal = v_info.get('ordinal')
nr_votes = v_info.get('votes')
if not (ordinal and nr_votes):
continue
try:
ordinal = int(ordinal)
except Exception:
continue
try:
nr_votes = int(nr_votes.replace(',', ''))
except Exception:
continue
nd['number of votes'][ordinal] = nr_votes
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try:
am = float(am)
except (ValueError, OverflowError):
pass
if isinstance(am, float):
nd['arithmetic mean'] = am
try:
med = int(med)
except (ValueError, OverflowError):
pass
if isinstance(med, int):
nd['median'] = med
return nd
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'):
href = href[1:]
# TODO: imdbURL_base may be set by the user!
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLCriticReviewsParser(DOMParserBase):
"""Parser for the "critic reviews" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLCriticReviewsParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'critic reviews'
rules = [
Rule(
key='metascore',
extractor=Path('//div[@class="metascore_wrap"]/div/span//text()')
),
Rule(
key='metacritic url',
extractor=Path('//div[@class="article"]/div[@class="see-more"]/a/@href')
)
]
class DOMHTMLReviewsParser(DOMParserBase):
"""Parser for the "reviews" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLReviewsParser()
result = osparser.parse(officialsites_html_string)
"""
rules = [
Rule(
key='reviews',
extractor=Rules(
foreach='//div[@class="review-container"]',
rules=[
Rule(
key='text',
extractor=Path('.//div[@class="text"]//text()')
),
Rule(
key='helpful',
extractor=Path('.//div[@class="text-muted"]/text()[1]')
),
Rule(
key='title',
extractor=Path('.//div[@class="title"]//text()')
),
Rule(
key='author',
extractor=Path('.//span[@class="display-name-link"]/a/@href')
),
Rule(
key='date',
extractor=Path('.//span[@class="review-date"]//text()')
),
Rule(
key='rating',
extractor=Path('.//span[@class="point-scale"]/preceding-sibling::span[1]/text()')
)
],
transform=lambda x: ({
'content': x.get('text', '').replace('\n', ' ').replace(' ', ' ').strip(),
'helpful': [int(s) for s in x.get('helpful', '').split() if s.isdigit()],
'title': x.get('title', '').strip(),
'author': analyze_imdbid(x.get('author')),
'date': x.get('date', '').strip(),
'rating': x.get('rating', '').strip()
})
)
)
]
preprocessors = [('<br>', '<br>\n')]
def postprocess_data(self, data):
for review in data.get('reviews', []):
if review.get('rating') and len(review['rating']) == 2:
review['rating'] = int(review['rating'][0])
else:
review['rating'] = None
if review.get('helpful') and len(review['helpful']) == 2:
review['not_helpful'] = review['helpful'][1] - review['helpful'][0]
review['helpful'] = review['helpful'][0]
else:
review['helpful'] = 0
review['not_helpful'] = 0
review['author'] = "ur%s" % review['author']
return data
class DOMHTMLFullCreditsParser(DOMParserBase):
"""Parser for the "full credits" (series cast section) page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLFullCreditsParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'full credits'
rules = [
Rule(
key='cast',
extractor=Rules(
foreach='//table[@class="cast_list"]//tr[@class="odd" or @class="even"]',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[2]/a/@href')
),
Rule(
key='roleID',
extractor=Path('./td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/')
)
)
)
]
preprocessors = [
(_reRolesMovie, _manageRoles)
]
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews"
"miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
rules = [
Rule(
foreach='//h4[@class="li_group"]',
key=Path(
'./text()',
transform=lambda x: x.strip().lower()
),
extractor=Rules(
foreach='./following::ul[1]/li/a',
rules=[
Rule(
key='link',
extractor=Path('./@href')
),
Rule(
key='info',
extractor=Path('./text()')
)
],
transform=lambda x: (
x.get('info').strip(),
urllib.parse.unquote(_normalize_href(x.get('link')))
)
)
)
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='connection',
extractor=Rules(
foreach='//div[@class="_imdbpy"]',
rules=[
Rule(
key=Path('./h5/text()', transform=str.lower),
extractor=Rules(
foreach='./a',
rules=[
Rule(
key='title',
extractor=Path('./text()')
),
Rule(
key='movieID',
extractor=Path('./@href')
)
]
)
)
]
)
)
]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in list(data.keys()):
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = ''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title, movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes, modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data:
return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
rules = [
Rule(
key='locations',
extractor=Rules(
foreach='//dt',
rules=[
Rule(
key='place',
extractor=Path('.//text()')
),
Rule(
key='note',
extractor=Path('./following-sibling::dd[1]//text()')
)
],
transform=lambda x: ('%s::%s' % (x['place'].strip(),
(x['note'] or '').strip())).strip(':')
)
)
]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "publicity" (for people) and "contacts" (for people)
pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = HTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
re_space = re.compile(r'\s+')
rules = [
Rule(
key='tech',
extractor=Rules(
foreach='//table//tr/td[@class="label"]',
rules=[
Rule(
key=Path(
'./text()',
transform=lambda x: x.lower().strip()),
extractor=Path(
'..//td[2]//text()',
transform=lambda x: [t.strip()
for t in x.split(':::') if t.strip()]
)
)
]
)
)
]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'</div>\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I), r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
(re.compile('<span class="ghost">\|</span>', re.I), r':::'),
(re.compile('<br/?>', re.I), r':::')
# this is for splitting individual entries
]
def postprocess_data(self, data):
for key in data:
data[key] = [x for x in data[key] if x != '|']
data[key] = [self.re_space.sub(' ', x).strip() for x in data[key]]
data[key] = [_f for _f in data[key] if _f]
if self.kind == 'contacts' and data:
data = {self.kind: data}
else:
if self.kind == 'publicity':
if 'biography (print)' in data:
data['biography-print'] = data['biography (print)']
del data['biography (print)']
# Tech info.
for key in list(data.keys()):
if key.startswith('film negative format'):
data['film negative format'] = data[key]
del data[key]
elif key.startswith('film length'):
data['film length'] = data[key]
del data[key]
return data
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='news',
extractor=Rules(
foreach='//h2',
rules=[
Rule(
key='title',
extractor=Path('./text()')
),
Rule(
key='fromdate',
extractor=Path('./following-sibling::p[1]/small//text()')
),
Rule(
key='body',
extractor=Path('../following-sibling::p[2]//text()')
),
Rule(
key='link',
extractor=Path('../..//a[text()="Permalink"]/@href')
),
Rule(
key='fulllink',
extractor=Path('../..//a[starts-with(text(), "See full article at")]/@href')
)
],
transform=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ', '').strip(),
'body': (x.get('body') or '').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}
)
)
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if 'news' not in data:
return {}
for news in data['news']:
if 'full article link' in news:
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':':
title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':':
kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLSeasonEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSeasonEpisodesParser()
result = sparser.parse(episodes_html_string)
"""
rules = [
Rule(
key='series link',
extractor=Path('//div[@class="parent"]//a/@href')
),
Rule(
key='series title',
extractor=Path('//head/meta[@property="og:title"]/@content')
),
Rule(
key='_seasons',
extractor=Path(
foreach='//select[@id="bySeason"]//option',
path='/@value'
)
),
Rule(
key='_current_season',
extractor=Path('//select[@id="bySeason"]//option[@selected]/@value')
),
Rule(
key='episodes',
extractor=Rules(
foreach='//div[@class="info"]',
rules=[
Rule(
key=Path('.//meta/@content',
transform=lambda x: 'episode %s' % x),
extractor=Rules(
rules=[
Rule(
key='link',
extractor=Path('.//strong//a[@href][1]/@href')
),
Rule(
key='original air date',
extractor=Path('.//div[@class="airdate"]/text()')
),
Rule(
key='title',
extractor=Path('.//strong//text()')
),
Rule(
key='rating',
extractor=Path(
'.//div[@class="ipl-rating-star "][1]'
'/span[@class="ipl-rating-star__rating"][1]/text()'
)
),
Rule(
key='votes',
extractor=Path(
'.//div[contains(@class, "ipl-rating-star")][1]'
'/span[@class="ipl-rating-star__total-votes"][1]/text()'
)
),
Rule(
key='plot',
extractor=Path('.//div[@class="item_description"]//text()')
)
]
)
)
]
)
)
]
def postprocess_data(self, data):
series_id = analyze_imdbid(data.get('series link'))
series_title = data.get('series title', '').strip()
selected_season = data.get('_current_season', 'unknown season').strip()
if not (series_id and series_title):
return {}
series = Movie(title=series_title, movieID=str(series_id),
accessSystem=self._as, modFunct=self._modFunct)
if series.get('kind') == 'movie':
series['kind'] = 'tv series'
try:
selected_season = int(selected_season)
except ValueError:
pass
nd = {selected_season: {}}
if 'episode -1' in data:
counter = 1
for episode in data['episode -1']:
while 'episode %d' % counter in data:
counter += 1
k = 'episode %d' % counter
data[k] = [episode]
del data['episode -1']
episodes = data.get('episodes', [])
for ep in episodes:
if not ep:
continue
episode_nr, episode = list(ep.items())[0]
if not episode_nr.startswith('episode '):
continue
episode_nr = episode_nr[8:].rstrip()
try:
episode_nr = int(episode_nr)
except ValueError:
pass
episode_id = analyze_imdbid(episode.get('link' ''))
episode_air_date = episode.get('original air date', '').strip()
episode_title = episode.get('title', '').strip()
episode_plot = episode.get('plot', '')
episode_rating = episode.get('rating', '')
episode_votes = episode.get('votes', '')
if not (episode_nr is not None and episode_id and episode_title):
continue
ep_obj = Movie(movieID=episode_id, title=episode_title,
accessSystem=self._as, modFunct=self._modFunct)
ep_obj['kind'] = 'episode'
ep_obj['episode of'] = series
ep_obj['season'] = selected_season
ep_obj['episode'] = episode_nr
if episode_rating:
try:
ep_obj['rating'] = float(episode_rating)
except:
pass
if episode_votes:
try:
ep_obj['votes'] = int(episode_votes.replace(',', '')
.replace('.', '').replace('(', '').replace(')', ''))
except:
pass
if episode_air_date:
ep_obj['original air date'] = episode_air_date
if episode_air_date[-4:].isdigit():
ep_obj['year'] = episode_air_date[-4:]
if episode_plot:
ep_obj['plot'] = episode_plot
nd[selected_season][episode_nr] = ep_obj
_seasons = data.get('_seasons') or []
for idx, season in enumerate(_seasons):
try:
_seasons[idx] = int(season)
except ValueError:
pass
return {'episodes': nd, '_seasons': _seasons, '_current_season': selected_season}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = 'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown':
year = '????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
# XXX: no more used for the list of episodes parser,
# but only for the episodes cast parser (see below).
_containsObjects = True
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.rules = [
Rule(
key='series title',
extractor=Path('//title/text()')
),
Rule(
key='series movieID',
extractor=Path(
'.//h1/a[@class="main"]/@href',
transform=analyze_imdbid
)
),
Rule(
key='episodes',
extractor=Rules(
foreach='//div[@class="_imdbpy"]/h3',
rules=[
Rule(
key='./a/@name',
extractor=Rules(
foreach=self._episodes_path,
rules=[
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='title',
extractor=Path('./a/text()')
),
Rule(
key='year',
extractor=Path('./preceding-sibling::a[1]/@name')
),
Rule(
key='episode',
extractor=Path('./text()[1]')
),
Rule(
key='oad',
extractor=Path(self._oad_path)
),
Rule(
key='plot',
extractor=Path('./following-sibling::text()[1]')
)
],
transform=_build_episode
)
)
]
)
)
]
if self.kind == 'episodes cast':
self.rules += [
Rule(
key='cast',
extractor=Rules(
foreach='//h4',
rules=[
Rule(
key=Path('./text()[1]', transform=str.strip),
extractor=Rules(
foreach='./following-sibling::table[1]//td[@class="nm"]',
rules=[
Rule(
key='person',
extractor=Path('..//text()')
),
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='roleID',
extractor=Path('../td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/'),
accessSystem=self._as,
modFunct=self._modFunct
)
)
)
]
)
)
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I), r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if 'series title' not in data:
return {}
if 'series movieID' not in data:
return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle:
return {}
seriesID = data['series movieID']
if seriesID is None:
return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in list(data.keys()):
if key.startswith('filter-season-') or key.startswith('season-'):
season_key = key.replace('filter-season-', '').replace('season-', '')
try:
season_key = int(season_key)
except ValueError:
pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode:
continue
episode_key = episode.get('episode')
if episode_key is None:
continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key, episode_key)
if cast_key in data:
cast = data[cast_key]
for i in range(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='faqs',
extractor=Rules(
foreach='//div[@class="section"]',
rules=[
Rule(
key='question',
extractor=Path('./h3/a/span/text()')
),
Rule(
key='answer',
extractor=Path('../following-sibling::div[1]//text()')
)
],
transform=lambda x: '%s::%s' % (
x.get('question').strip(),
'\n\n'.join(x.get('answer').replace('\n\n', '\n').strip().split('||'))
)
)
)
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='series title',
extractor=Path(
'//title/text()',
transform=lambda x: x.replace(' - TV schedule', '')
)
),
Rule(
key='series id',
extractor=Path('//h1/a[@href]/@href')
),
Rule(
key='tv airings',
extractor=Rules(
foreach='//tr[@class]',
rules=[
Rule(
key='date',
extractor=Path('./td[1]//text()')
),
Rule(
key='time',
extractor=Path('./td[2]//text()')
),
Rule(
key='channel',
extractor=Path('./td[3]//text()')
),
Rule(
key='link',
extractor=Path('./td[4]/a[1]/@href')
),
Rule(
key='title',
extractor=Path('./td[4]//text()')
),
Rule(
key='season',
extractor=Path('./td[5]//text()')
)
],
transform=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
)
)
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data.get('series title') or ''
seriesID = analyze_imdbid(data.get('series id'))
if seriesID and 'airing' in data:
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = [_f for _f in data['airing'] if _f]
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
rules = [
Rule(
key='parents guide',
extractor=Rules(
foreach='//div[@class="section"]',
rules=[
Rule(
key=Path(
'./h3/a/span/text()',
transform=str.lower
),
extractor=Path(
foreach='../following-sibling::div[1]/p',
path='.//text()',
transform=lambda x: [
t.strip().replace('\n', ' ')
for t in x.split('||') if t.strip()
]
)
)
]
)
)
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
def postprocess_data(self, data):
data2 = {}
for key in data:
if data[key]:
data2[key] = data[key]
if not data2:
return {}
return {'parents guide': data2}
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'full_credits_parser': ((DOMHTMLFullCreditsParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), None),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'criticrev_parser': ((DOMHTMLCriticReviewsParser,), {'kind': 'critic reviews'}),
'reviews_parser': ((DOMHTMLReviewsParser,), {'kind': 'reviews'}),
'externalsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,), None),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,), None),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,), None),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,), None),
'photosites_parser': ((DOMHTMLOfficialsitesParser,), None),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'season_episodes_parser': ((DOMHTMLSeasonEpisodesParser,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
|
logituit/Recbot
|
PY/imdb/parser/http/movieParser.py
|
Python
|
mit
| 90,920
|
[
"Brian"
] |
f47894ded09bbcbc37e1736fc63f935d452525c294b240399d9b575f09103e1b
|
#
# libtcod 1.6.3 Python wrapper
# Copyright (c) 2008,2009,2010,2012,2013,2016,2017 Jice & Mingos & rmtew
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice or Mingos may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JICE, MINGOS AND RMTEW ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JICE, MINGOS OR RMTEW BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import os
import sys
import ctypes
import struct
from ctypes import *
# We do not have a fully unicode API on libtcod, so all unicode strings have to
# be implicitly converted to ascii, and any unicode specific operations have to
# be explicitly made by users
# v = v.encode('latin-1')
# Returned byte strings from the api, should be converted to unicode, so that
# if formatted via %, do not appear as "b'sds'".
# v = v.decode("utf-8")
is_python_3 = sys.version_info > (3, 0)
if is_python_3:
def convert_to_ascii(v):
if type(v) is str:
return v.encode('ascii')
return v
else:
def convert_to_ascii(v):
if type(v) is unicode:
return v.encode('ascii')
return v
if not hasattr(ctypes, "c_bool"): # for Python < 2.6
c_bool = c_uint8
c_void = None
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
LINUX=False
MAC=False
MINGW=False
MSVC=False
def _get_cdll(libname):
'''
get the library libname using a manual search path that will first
check the package directory and then the development path
returns the ctypes lib object
'''
def get_pe_architecture(filePath):
# From: https://github.com/tgandor/meats/blob/master/missing/arch_of.py
with open(filePath, 'rb') as f:
doshdr = f.read(64)
magic, padding, offset = struct.unpack('2s58si', doshdr)
# print (magic, offset)
if magic != b'MZ':
return None
f.seek(offset, os.SEEK_SET)
pehdr = f.read(6)
# careful! H == unsigned short, x64 is negative with signed
magic, padding, machine = struct.unpack('2s2sH', pehdr)
# print (magic, hex(machine))
if magic != b'PE':
return None
if machine == 0x014c:
return 'i386'
if machine == 0x0200:
return 'IA64'
if machine == 0x8664:
return 'x64'
return 'unknown'
pythonExePath = sys.executable
pythonExeArchitecture = get_pe_architecture(pythonExePath)
pathsToTry = []
# 1. Try the directory this script is located in.
pathsToTry.append(os.path.join(__path__[0], libname))
# 2. Try the directory of the command-line script.
scriptFilePath = sys.argv[0]
scriptPath = os.path.dirname(scriptFilePath)
if len(scriptPath):
pathsToTry.append(os.path.join(scriptPath, libname))
else:
pathsToTry.append(os.path.join(os.getcwd(), libname))
# 3. Try the environment variable LIBTCOD_DLL_PATH.
if "LIBTCOD_DLL_PATH" in os.environ:
envPaths = os.environ["LIBTCOD_DLL_PATH"].split(";")
for envPath in envPaths:
if os.path.exists(envPath):
pathsToTry.append(os.path.join(envPath, libname))
# 4. Try the top-level path in the development tree.
potentialTopLevelPath = os.path.realpath(os.path.join(__path__[0], os.pardir, os.pardir))
pythonPath = os.path.join(potentialTopLevelPath, "python")
if os.path.exists(pythonPath):
pathsToTry.append(os.path.join(potentialTopLevelPath, libname))
for libPath in pathsToTry:
if os.path.exists(libPath):
# get library from the package
libArchitecture = get_pe_architecture(libPath)
if libArchitecture != pythonExeArchitecture:
libName = os.path.basename(libPath)
print ("Error: Incompatible architecture, python is %s, %s is %s" % (pythonExeArchitecture, libName, libArchitecture))
sys.exit(1)
return ctypes.cdll[libPath]
raise Exception("unable to locate: "+ libname)
if sys.platform.find('linux') != -1:
_lib = _get_cdll('libtcod.so')
LINUX=True
elif sys.platform.find('darwin') != -1:
_lib = _get_cdll('libtcod.dylib')
MAC = True
elif sys.platform.find('haiku') != -1:
_lib = _get_cdll('libtcod.so')
HAIKU = True
else:
_get_cdll('SDL2.dll')
_lib = _get_cdll('libtcod.dll')
MSVC=True
# On Windows, ctypes doesn't work well with function returning structs,
# so we have to user the _wrapper functions instead
for function_name in [
"TCOD_color_equals",
"TCOD_color_add",
"TCOD_color_subtract",
"TCOD_color_multiply",
"TCOD_color_multiply_scalar",
"TCOD_color_lerp",
"TCOD_color_get_HSV",
"TCOD_color_get_hue",
"TCOD_color_get_saturation",
"TCOD_color_get_value",
"TCOD_console_get_default_background",
"TCOD_console_get_default_foreground",
"TCOD_console_set_default_background",
"TCOD_console_set_default_foreground",
"TCOD_console_get_char_foreground",
"TCOD_console_get_char_background",
"TCOD_console_set_char_background",
"TCOD_console_set_char_foreground",
"TCOD_console_put_char_ex",
"TCOD_console_set_fade",
"TCOD_console_get_fading_color",
"TCOD_console_set_color_control",
"TCOD_image_clear",
"TCOD_image_get_pixel",
"TCOD_image_get_mipmap_pixel",
"TCOD_image_put_pixel",
"TCOD_image_set_key_color",
"TCOD_parser_get_color_property",
"TCOD_console_set_key_color",
]:
wrapper_func = getattr(_lib, function_name +"_wrapper", None)
if wrapper_func is not None:
setattr(_lib, function_name, wrapper_func)
else:
raise Exception("unable to find wrapper", function_name)
HEXVERSION = 0x010603
STRVERSION = "1.6.3"
TECHVERSION = 0x01060300
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint8),
('g', c_uint8),
('b', c_uint8),
]
def __eq__(self, c):
return _lib.TCOD_color_equals(self, c)
def __mul__(self, c):
if isinstance(c,Color):
return _lib.TCOD_color_multiply(self, c)
else:
return _lib.TCOD_color_multiply_scalar(self, c_float(c))
def __add__(self, c):
return _lib.TCOD_color_add(self, c)
def __sub__(self, c):
return _lib.TCOD_color_subtract(self, c)
def __repr__(self):
return "Color(%d,%d,%d)" % (self.r, self.g, self.b)
def __getitem__(self, i):
if type(i) == str:
return getattr(self, i)
else:
return getattr(self, "rgb"[i])
def __setitem__(self, i, c):
if type(i) == str:
setattr(self, i, c)
else:
setattr(self, "rgb"[i], c)
def __iter__(self):
yield self.r
yield self.g
yield self.b
_lib.TCOD_color_equals.restype=c_bool
_lib.TCOD_color_equals.argtypes=[Color, Color]
_lib.TCOD_color_add.restype=Color
_lib.TCOD_color_add.argtypes=[Color, Color]
_lib.TCOD_color_subtract.restype=Color
_lib.TCOD_color_subtract.argtypes=[Color, Color]
_lib.TCOD_color_multiply.restype=Color
_lib.TCOD_color_multiply.argtypes=[Color , Color ]
_lib.TCOD_color_multiply_scalar.restype=Color
_lib.TCOD_color_multiply_scalar.argtypes=[Color , c_float ]
# Should be valid on any platform, check it! Has to be done after Color is defined.
# NOTE(rmtew): This should ideally be deleted. Most of it is moved or duplicated here.
if MAC:
from .cprotos import setup_protos
setup_protos(_lib)
# default colors
# grey levels
black=Color(0,0,0)
darkest_grey=Color(31,31,31)
darker_grey=Color(63,63,63)
dark_grey=Color(95,95,95)
grey=Color(127,127,127)
light_grey=Color(159,159,159)
lighter_grey=Color(191,191,191)
lightest_grey=Color(223,223,223)
darkest_gray=Color(31,31,31)
darker_gray=Color(63,63,63)
dark_gray=Color(95,95,95)
gray=Color(127,127,127)
light_gray=Color(159,159,159)
lighter_gray=Color(191,191,191)
lightest_gray=Color(223,223,223)
white=Color(255,255,255)
# sepia
darkest_sepia=Color(31,24,15)
darker_sepia=Color(63,50,31)
dark_sepia=Color(94,75,47)
sepia=Color(127,101,63)
light_sepia=Color(158,134,100)
lighter_sepia=Color(191,171,143)
lightest_sepia=Color(222,211,195)
#standard colors
red=Color(255,0,0)
flame=Color(255,63,0)
orange=Color(255,127,0)
amber=Color(255,191,0)
yellow=Color(255,255,0)
lime=Color(191,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
turquoise=Color(0,255,191)
cyan=Color(0,255,255)
sky=Color(0,191,255)
azure=Color(0,127,255)
blue=Color(0,0,255)
han=Color(63,0,255)
violet=Color(127,0,255)
purple=Color(191,0,255)
fuchsia=Color(255,0,255)
magenta=Color(255,0,191)
pink=Color(255,0,127)
crimson=Color(255,0,63)
# dark colors
dark_red=Color(191,0,0)
dark_flame=Color(191,47,0)
dark_orange=Color(191,95,0)
dark_amber=Color(191,143,0)
dark_yellow=Color(191,191,0)
dark_lime=Color(143,191,0)
dark_chartreuse=Color(95,191,0)
dark_green=Color(0,191,0)
dark_sea=Color(0,191,95)
dark_turquoise=Color(0,191,143)
dark_cyan=Color(0,191,191)
dark_sky=Color(0,143,191)
dark_azure=Color(0,95,191)
dark_blue=Color(0,0,191)
dark_han=Color(47,0,191)
dark_violet=Color(95,0,191)
dark_purple=Color(143,0,191)
dark_fuchsia=Color(191,0,191)
dark_magenta=Color(191,0,143)
dark_pink=Color(191,0,95)
dark_crimson=Color(191,0,47)
# darker colors
darker_red=Color(127,0,0)
darker_flame=Color(127,31,0)
darker_orange=Color(127,63,0)
darker_amber=Color(127,95,0)
darker_yellow=Color(127,127,0)
darker_lime=Color(95,127,0)
darker_chartreuse=Color(63,127,0)
darker_green=Color(0,127,0)
darker_sea=Color(0,127,63)
darker_turquoise=Color(0,127,95)
darker_cyan=Color(0,127,127)
darker_sky=Color(0,95,127)
darker_azure=Color(0,63,127)
darker_blue=Color(0,0,127)
darker_han=Color(31,0,127)
darker_violet=Color(63,0,127)
darker_purple=Color(95,0,127)
darker_fuchsia=Color(127,0,127)
darker_magenta=Color(127,0,95)
darker_pink=Color(127,0,63)
darker_crimson=Color(127,0,31)
# darkest colors
darkest_red=Color(63,0,0)
darkest_flame=Color(63,15,0)
darkest_orange=Color(63,31,0)
darkest_amber=Color(63,47,0)
darkest_yellow=Color(63,63,0)
darkest_lime=Color(47,63,0)
darkest_chartreuse=Color(31,63,0)
darkest_green=Color(0,63,0)
darkest_sea=Color(0,63,31)
darkest_turquoise=Color(0,63,47)
darkest_cyan=Color(0,63,63)
darkest_sky=Color(0,47,63)
darkest_azure=Color(0,31,63)
darkest_blue=Color(0,0,63)
darkest_han=Color(15,0,63)
darkest_violet=Color(31,0,63)
darkest_purple=Color(47,0,63)
darkest_fuchsia=Color(63,0,63)
darkest_magenta=Color(63,0,47)
darkest_pink=Color(63,0,31)
darkest_crimson=Color(63,0,15)
# light colors
light_red=Color(255,114,114)
light_flame=Color(255,149,114)
light_orange=Color(255,184,114)
light_amber=Color(255,219,114)
light_yellow=Color(255,255,114)
light_lime=Color(219,255,114)
light_chartreuse=Color(184,255,114)
light_green=Color(114,255,114)
light_sea=Color(114,255,184)
light_turquoise=Color(114,255,219)
light_cyan=Color(114,255,255)
light_sky=Color(114,219,255)
light_azure=Color(114,184,255)
light_blue=Color(114,114,255)
light_han=Color(149,114,255)
light_violet=Color(184,114,255)
light_purple=Color(219,114,255)
light_fuchsia=Color(255,114,255)
light_magenta=Color(255,114,219)
light_pink=Color(255,114,184)
light_crimson=Color(255,114,149)
#lighter colors
lighter_red=Color(255,165,165)
lighter_flame=Color(255,188,165)
lighter_orange=Color(255,210,165)
lighter_amber=Color(255,232,165)
lighter_yellow=Color(255,255,165)
lighter_lime=Color(232,255,165)
lighter_chartreuse=Color(210,255,165)
lighter_green=Color(165,255,165)
lighter_sea=Color(165,255,210)
lighter_turquoise=Color(165,255,232)
lighter_cyan=Color(165,255,255)
lighter_sky=Color(165,232,255)
lighter_azure=Color(165,210,255)
lighter_blue=Color(165,165,255)
lighter_han=Color(188,165,255)
lighter_violet=Color(210,165,255)
lighter_purple=Color(232,165,255)
lighter_fuchsia=Color(255,165,255)
lighter_magenta=Color(255,165,232)
lighter_pink=Color(255,165,210)
lighter_crimson=Color(255,165,188)
# lightest colors
lightest_red=Color(255,191,191)
lightest_flame=Color(255,207,191)
lightest_orange=Color(255,223,191)
lightest_amber=Color(255,239,191)
lightest_yellow=Color(255,255,191)
lightest_lime=Color(239,255,191)
lightest_chartreuse=Color(223,255,191)
lightest_green=Color(191,255,191)
lightest_sea=Color(191,255,223)
lightest_turquoise=Color(191,255,239)
lightest_cyan=Color(191,255,255)
lightest_sky=Color(191,239,255)
lightest_azure=Color(191,223,255)
lightest_blue=Color(191,191,255)
lightest_han=Color(207,191,255)
lightest_violet=Color(223,191,255)
lightest_purple=Color(239,191,255)
lightest_fuchsia=Color(255,191,255)
lightest_magenta=Color(255,191,239)
lightest_pink=Color(255,191,223)
lightest_crimson=Color(255,191,207)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_flame=Color(127,79,63)
desaturated_orange=Color(127,95,63)
desaturated_amber=Color(127,111,63)
desaturated_yellow=Color(127,127,63)
desaturated_lime=Color(111,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_turquoise=Color(63,127,111)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,111,127)
desaturated_azure=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_han=Color(79,63,127)
desaturated_violet=Color(95,63,127)
desaturated_purple=Color(111,63,127)
desaturated_fuchsia=Color(127,63,127)
desaturated_magenta=Color(127,63,111)
desaturated_pink=Color(127,63,95)
desaturated_crimson=Color(127,63,79)
# metallic
brass=Color(191,151,96)
copper=Color(197,136,124)
gold=Color(229,191,0)
silver=Color(203,203,203)
# miscellaneous
celadon=Color(172,255,175)
peach=Color(255,159,127)
# color functions
_lib.TCOD_color_lerp.restype = Color
def color_lerp(c1, c2, a):
return _lib.TCOD_color_lerp(c1, c2, c_float(a))
_lib.TCOD_color_set_HSV.restype=c_void
_lib.TCOD_color_set_HSV.argtypes=[POINTER(Color),c_float , c_float , c_float ]
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
_lib.TCOD_color_get_HSV.restype=c_void
_lib.TCOD_color_get_HSV.argtypes=[Color ,POINTER(c_float) , POINTER(c_float) , POINTER(c_float) ]
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
_lib.TCOD_color_scale_HSV.restype=c_void
_lib.TCOD_color_scale_HSV.argtypes=[POINTER(Color), c_float , c_float ]
def color_scale_HSV(c, scoef, vcoef) :
_lib.TCOD_color_scale_HSV(byref(c),c_float(scoef),c_float(vcoef))
_lib.TCOD_color_gen_map.restype=c_void
_lib.TCOD_color_gen_map.argtypes=[POINTER(Color), c_int, POINTER(Color), POINTER(c_int)]
def color_gen_map(colors, indexes):
ccolors = (Color * len(colors))(*colors)
cindexes = (c_int * len(indexes))(*indexes)
cres = (Color * (max(indexes) + 1))()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return cres
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int),
('c', c_uint8),
('text',c_char * 32),
('pressed', c_bool),
('lalt', c_bool),
('lctrl', c_bool),
('lmeta', c_bool),
('ralt', c_bool),
('rctrl', c_bool),
('rmeta', c_bool),
('shift', c_bool)
]
class ConsoleBuffer:
# simple console that allows direct (fast) access to cells. simplifies
# use of the "fill" functions.
def __init__(self, width, height, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# initialize with given width and height. values to fill the buffer
# are optional, defaults to black with no characters.
n = width * height
self.width = width
self.height = height
self.clear(back_r, back_g, back_b, fore_r, fore_g, fore_b, char)
def clear(self, back_r=0, back_g=0, back_b=0, fore_r=0, fore_g=0, fore_b=0, char=' '):
# clears the console. values to fill it with are optional, defaults
# to black with no characters.
n = self.width * self.height
self.back_r = [back_r] * n
self.back_g = [back_g] * n
self.back_b = [back_b] * n
self.fore_r = [fore_r] * n
self.fore_g = [fore_g] * n
self.fore_b = [fore_b] * n
self.char = [ord(char)] * n
def copy(self):
# returns a copy of this ConsoleBuffer.
other = ConsoleBuffer(0, 0)
other.width = self.width
other.height = self.height
other.back_r = list(self.back_r) # make explicit copies of all lists
other.back_g = list(self.back_g)
other.back_b = list(self.back_b)
other.fore_r = list(self.fore_r)
other.fore_g = list(self.fore_g)
other.fore_b = list(self.fore_b)
other.char = list(self.char)
return other
def set_fore(self, x, y, r, g, b, char):
# set the character and foreground color of one cell.
i = self.width * y + x
self.fore_r[i] = int(r)
self.fore_g[i] = int(g)
self.fore_b[i] = int(b)
self.char[i] = ord(char)
def set_back(self, x, y, r, g, b):
# set the background color of one cell.
i = self.width * y + x
self.back_r[i] = int(r)
self.back_g[i] = int(g)
self.back_b[i] = int(b)
def set(self, x, y, back_r, back_g, back_b, fore_r, fore_g, fore_b, char):
# set the background color, foreground color and character of one cell.
i = self.width * y + x
self.back_r[i] = int(back_r)
self.back_g[i] = int(back_g)
self.back_b[i] = int(back_b)
self.fore_r[i] = int(fore_r)
self.fore_g[i] = int(fore_g)
self.fore_b[i] = int(fore_b)
self.char[i] = ord(char)
def blit(self, dest, fill_fore=True, fill_back=True):
# use libtcod's "fill" functions to write the buffer to a console.
if (console_get_width(dest) != self.width or
console_get_height(dest) != self.height):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
s = struct.Struct('%di' % len(self.back_r))
if fill_back:
_lib.TCOD_console_fill_background(c_void_p(dest), (c_int * len(self.back_r))(*self.back_r), (c_int * len(self.back_g))(*self.back_g), (c_int * len(self.back_b))(*self.back_b))
if fill_fore:
_lib.TCOD_console_fill_foreground(c_void_p(dest), (c_int * len(self.fore_r))(*self.fore_r), (c_int * len(self.fore_g))(*self.fore_g), (c_int * len(self.fore_b))(*self.fore_b))
_lib.TCOD_console_fill_char(c_void_p(dest), (c_int * len(self.char))(*self.char))
_lib.TCOD_console_is_fullscreen.restype = c_bool
_lib.TCOD_console_is_window_closed.restype = c_bool
_lib.TCOD_console_has_mouse_focus.restype = c_bool
_lib.TCOD_console_is_active.restype = c_bool
_lib.TCOD_console_get_default_background.restype = Color
_lib.TCOD_console_get_default_foreground.restype = Color
_lib.TCOD_console_get_char_background.restype = Color
_lib.TCOD_console_get_char_foreground.restype = Color
_lib.TCOD_console_get_fading_color.restype = Color
_lib.TCOD_console_is_key_pressed.restype = c_bool
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
BKGND_DEFAULT=13
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
KEY_TEXT = 66
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# misc characters
CHAR_BULLET = 7
CHAR_BULLET_INV = 8
CHAR_BULLET_SQUARE = 254
CHAR_CENT = 189
CHAR_CLUB = 5
CHAR_COPYRIGHT = 184
CHAR_CURRENCY = 207
CHAR_DIAMOND = 4
CHAR_DIVISION = 246
CHAR_EXCLAM_DOUBLE = 19
CHAR_FEMALE = 12
CHAR_FUNCTION = 159
CHAR_GRADE = 248
CHAR_HALF = 171
CHAR_HEART = 3
CHAR_LIGHT = 15
CHAR_MALE = 11
CHAR_MULTIPLICATION = 158
CHAR_NOTE = 13
CHAR_NOTE_DOUBLE = 14
CHAR_ONE_QUARTER = 172
CHAR_PILCROW = 20
CHAR_POUND = 156
CHAR_POW1 = 251
CHAR_POW2 = 253
CHAR_POW3 = 252
CHAR_RESERVED = 169
CHAR_SECTION = 21
CHAR_SMILIE = 1
CHAR_SMILIE_INV = 2
CHAR_SPADE = 6
CHAR_THREE_QUARTERS = 243
CHAR_UMLAUT = 249
CHAR_YEN = 190
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# renderers
RENDERER_GLSL=0
RENDERER_OPENGL=1
RENDERER_SDL=2
NB_RENDERERS=3
# alignment
LEFT=0
RIGHT=1
CENTER=2
# initializing the console
_lib.TCOD_console_init_root.restype=c_void
_lib.TCOD_console_init_root.argtypes=[c_int, c_int, c_char_p , c_bool , c_uint ]
def console_init_root(w, h, title, fullscreen=False, renderer=RENDERER_SDL):
_lib.TCOD_console_init_root(w, h, convert_to_ascii(title), fullscreen, renderer)
_lib.TCOD_console_set_custom_font.restype=c_void
_lib.TCOD_console_set_custom_font.argtypes=[c_char_p, c_int,c_int, c_int]
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(convert_to_ascii(fontFile), flags, nb_char_horiz, nb_char_vertic)
_lib.TCOD_console_map_ascii_code_to_font.restype=c_void
_lib.TCOD_console_map_ascii_code_to_font.argtypes=[c_int, c_int, c_int]
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
asciiCode = convert_to_ascii(asciiCode)
if type(asciiCode) is bytes:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY)
_lib.TCOD_console_map_ascii_codes_to_font.restype=c_void
_lib.TCOD_console_map_ascii_codes_to_font.argtypes=[c_int, c_int, c_int, c_int]
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str or type(firstAsciiCode) == bytes:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes, fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX, fontCharY)
_lib.TCOD_console_map_string_to_font.argtypes=[c_char_p, c_int, c_int]
_lib.TCOD_console_map_string_to_font_utf.argtypes=[c_wchar_p, c_int, c_int]
def console_map_string_to_font(s, fontCharX, fontCharY):
# Python 3, utf is normal, so if they want utf behaviour call the other function.
if type(s) is bytes or is_python_3:
_lib.TCOD_console_map_string_to_font(convert_to_ascii(s), fontCharX, fontCharY)
else:
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
def console_map_string_to_font_utf(s, fontCharX, fontCharY):
_lib.TCOD_console_map_string_to_font_utf(s, fontCharX, fontCharY)
_lib.TCOD_console_is_fullscreen.restype=c_bool
_lib.TCOD_console_is_fullscreen.argtypes=[]
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen()
_lib.TCOD_console_set_fullscreen.restype=c_void
_lib.TCOD_console_set_fullscreen.argtypes=[c_bool ]
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
_lib.TCOD_console_is_window_closed.restype=c_bool
_lib.TCOD_console_is_window_closed.argtypes=[]
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed()
_lib.TCOD_console_has_mouse_focus.restype=c_bool
_lib.TCOD_console_has_mouse_focus.argtypes=[]
def console_has_mouse_focus():
return _lib.TCOD_console_has_mouse_focus()
_lib.TCOD_console_is_active.restype=c_bool
_lib.TCOD_console_is_active.argtypes=[]
def console_is_active():
return _lib.TCOD_console_is_active()
_lib.TCOD_console_set_window_title.restype=c_void
_lib.TCOD_console_set_window_title.argtypes=[c_char_p]
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(convert_to_ascii(title))
_lib.TCOD_console_credits_render.restype = c_bool
def console_credits():
_lib.TCOD_console_credits()
_lib.TCOD_console_credits_reset.restype=c_void
_lib.TCOD_console_credits_reset.argtypes=[]
def console_credits_reset():
_lib.TCOD_console_credits_reset()
_lib.TCOD_console_credits_render.restype=c_bool
_lib.TCOD_console_credits_render.argtypes=[c_int, c_int, c_bool ]
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha))
_lib.TCOD_console_flush.restype=c_void
_lib.TCOD_console_flush.argtypes=[]
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
_lib.TCOD_console_set_default_background.restype=c_void
_lib.TCOD_console_set_default_background.argtypes=[c_void_p ,Color ]
def console_set_default_background(con, col):
_lib.TCOD_console_set_default_background(con, col)
_lib.TCOD_console_set_default_foreground.restype=c_void
_lib.TCOD_console_set_default_foreground.argtypes=[c_void_p ,Color ]
def console_set_default_foreground(con, col):
_lib.TCOD_console_set_default_foreground(con, col)
_lib.TCOD_console_clear.restype=c_void
_lib.TCOD_console_clear.argtypes=[c_void_p ]
def console_clear(con):
return _lib.TCOD_console_clear(con)
_lib.TCOD_console_put_char.restype=c_void
_lib.TCOD_console_put_char.argtypes=[c_void_p ,c_int, c_int, c_int, c_int]
def console_put_char(con, x, y, c, flag=BKGND_DEFAULT):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char(c_void_p(con), x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(c_void_p(con), x, y, c, flag)
_lib.TCOD_console_put_char_ex.restype=c_void
_lib.TCOD_console_put_char_ex.argtypes=[c_void_p ,c_int, c_int, c_int, Color, Color]
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_put_char_ex(c_void_p(con), x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(c_void_p(con), x, y, c, fore, back)
_lib.TCOD_console_set_char_background.restype=c_void
_lib.TCOD_console_set_char_background.argtypes=[c_void_p ,c_int, c_int, Color , c_int ]
def console_set_char_background(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_char_background(con, x, y, col, flag)
_lib.TCOD_console_set_char_foreground.restype=c_void
_lib.TCOD_console_set_char_foreground.argtypes=[c_void_p ,c_int, c_int, Color ]
def console_set_char_foreground(con, x, y, col):
_lib.TCOD_console_set_char_foreground(con, x, y, col)
_lib.TCOD_console_set_char.restype=c_void
_lib.TCOD_console_set_char.argtypes=[c_void_p ,c_int, c_int, c_int]
def console_set_char(con, x, y, c):
if type(c) == str or type(c) == bytes:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
_lib.TCOD_console_set_background_flag.restype=c_void
_lib.TCOD_console_set_background_flag.argtypes=[c_void_p ,c_int ]
def console_set_background_flag(con, flag):
_lib.TCOD_console_set_background_flag(con, flag)
_lib.TCOD_console_get_background_flag.restype=c_int
_lib.TCOD_console_get_background_flag.argtypes=[c_void_p ]
def console_get_background_flag(con):
return _lib.TCOD_console_get_background_flag(con)
_lib.TCOD_console_set_alignment.restype=c_void
_lib.TCOD_console_set_alignment.argtypes=[c_void_p ,c_int ]
def console_set_alignment(con, alignment):
_lib.TCOD_console_set_alignment(con, alignment)
_lib.TCOD_console_get_alignment.restype=c_int
_lib.TCOD_console_get_alignment.argtypes=[c_void_p ]
def console_get_alignment(con):
return _lib.TCOD_console_get_alignment(con)
_lib.TCOD_console_print.argtypes=[c_void_p,c_int,c_int,c_char_p]
def console_print(con, x, y, fmt):
if type(fmt) == bytes or is_python_3:
_lib.TCOD_console_print(con, x, y, convert_to_ascii(fmt))
else:
_lib.TCOD_console_print_utf(con, x, y, fmt)
_lib.TCOD_console_print_ex.argtypes=[c_void_p,c_int,c_int,c_int,c_int,c_char_p]
_lib.TCOD_console_print_ex_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_wchar_p]
def console_print_ex(con, x, y, flag, alignment, fmt):
if type(fmt) == bytes or is_python_3:
_lib.TCOD_console_print_ex(con, x, y, flag, alignment, convert_to_ascii(fmt))
else:
_lib.TCOD_console_print_ex_utf(con, x, y, flag, alignment, fmt)
_lib.TCOD_console_print_rect.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_char_p]
_lib.TCOD_console_print_rect_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_wchar_p]
def console_print_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes or is_python_3:
return _lib.TCOD_console_print_rect(con, x, y, w, h, convert_to_ascii(fmt))
else:
return _lib.TCOD_console_print_rect_utf(con, x, y, w, h, fmt)
_lib.TCOD_console_print_rect_ex.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_int, c_int, c_char_p]
_lib.TCOD_console_print_rect_ex_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_int, c_int, c_wchar_p]
def console_print_rect_ex(con, x, y, w, h, flag, alignment, fmt):
if type(fmt) == bytes or is_python_3:
return _lib.TCOD_console_print_rect_ex(con, x, y, w, h, flag, alignment, convert_to_ascii(fmt))
else:
return _lib.TCOD_console_print_rect_ex_utf(con, x, y, w, h, flag, alignment, fmt)
_lib.TCOD_console_get_height_rect.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_char_p]
_lib.TCOD_console_get_height_rect_utf.argtypes=[c_void_p, c_int, c_int, c_int, c_int, c_wchar_p]
def console_get_height_rect(con, x, y, w, h, fmt):
if type(fmt) == bytes or is_python_3:
return _lib.TCOD_console_get_height_rect(con, x, y, w, h, convert_to_ascii(fmt))
else:
return _lib.TCOD_console_get_height_rect_utf(con, x, y, w, h, fmt)
_lib.TCOD_console_rect.argtypes=[ c_void_p, c_int, c_int, c_int, c_int, c_bool, c_int ]
def console_rect(con, x, y, w, h, clr, flag=BKGND_DEFAULT):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
_lib.TCOD_console_hline.argtypes=[ c_void_p, c_int, c_int, c_int, c_int ]
def console_hline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_hline( con, x, y, l, flag)
_lib.TCOD_console_vline.argtypes=[ c_void_p, c_int, c_int, c_int, c_int ]
def console_vline(con, x, y, l, flag=BKGND_DEFAULT):
_lib.TCOD_console_vline( con, x, y, l, flag)
_lib.TCOD_console_print_frame.argtypes=[c_void_p,c_int,c_int,c_int,c_int,c_int,c_int,c_char_p]
def console_print_frame(con, x, y, w, h, clear=True, flag=BKGND_DEFAULT, fmt=''):
_lib.TCOD_console_print_frame(con, x, y, w, h, clear, flag, convert_to_ascii(fmt))
_lib.TCOD_console_get_foreground_color_image.restype=c_void_p
_lib.TCOD_console_get_foreground_color_image.argtypes=[c_void_p]
def console_get_foreground_image(con):
return _lib.TCOD_console_get_foreground_color_image(con)
_lib.TCOD_console_get_background_color_image.restype=c_void_p
_lib.TCOD_console_get_background_color_image.argtypes=[c_void_p]
def console_get_background_image(con):
return _lib.TCOD_console_get_background_color_image(con)
_lib.TCOD_console_set_color_control.restype=c_void
_lib.TCOD_console_set_color_control.argtypes=[c_void_p, Color, Color ]
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
_lib.TCOD_console_get_default_background.restype=Color
_lib.TCOD_console_get_default_background.argtypes=[c_void_p]
def console_get_default_background(con):
return _lib.TCOD_console_get_default_background(con)
_lib.TCOD_console_get_default_foreground.restype=Color
_lib.TCOD_console_get_default_foreground.argtypes=[c_void_p]
def console_get_default_foreground(con):
return _lib.TCOD_console_get_default_foreground(con)
_lib.TCOD_console_get_char_background.restype=Color
_lib.TCOD_console_get_char_background.argtypes=[c_void_p, c_int, c_int]
def console_get_char_background(con, x, y):
return _lib.TCOD_console_get_char_background(con, x, y)
_lib.TCOD_console_get_char_foreground.restype=Color
_lib.TCOD_console_get_char_foreground.argtypes=[c_void_p, c_int, c_int]
def console_get_char_foreground(con, x, y):
return _lib.TCOD_console_get_char_foreground(con, x, y)
_lib.TCOD_console_get_char.restype=c_int
_lib.TCOD_console_get_char.argtypes=[c_void_p, c_int, c_int]
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
_lib.TCOD_console_set_fade.restype=c_void
_lib.TCOD_console_set_fade.argtypes=[c_byte, Color]
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade(fade, fadingColor)
_lib.TCOD_console_get_fade.restype=c_byte
_lib.TCOD_console_get_fade.argtypes=[]
def console_get_fade():
return _lib.TCOD_console_get_fade()
_lib.TCOD_console_get_fading_color.restype=Color
_lib.TCOD_console_get_fading_color.argtypes=[]
def console_get_fading_color():
return _lib.TCOD_console_get_fading_color()
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_bool(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
_lib.TCOD_console_is_key_pressed.restype=c_bool
_lib.TCOD_console_is_key_pressed.argtypes=[c_int ]
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key)
# using offscreen consoles
_lib.TCOD_console_new.restype=c_void_p
_lib.TCOD_console_new.argtypes=[c_int, c_int]
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
_lib.TCOD_console_from_file.restype=c_void_p
_lib.TCOD_console_from_file.argtypes=[c_char_p]
def console_from_file(filename):
return _lib.TCOD_console_from_file(convert_to_ascii(filename))
_lib.TCOD_console_get_width.restype=c_int
_lib.TCOD_console_get_width.argtypes=[c_void_p ]
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
_lib.TCOD_console_get_height.restype=c_int
_lib.TCOD_console_get_height.argtypes=[c_void_p ]
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
_lib.TCOD_console_blit.argtypes=[c_void_p ,c_int, c_int, c_int, c_int, c_void_p , c_int, c_int, c_float, c_float]
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
_lib.TCOD_console_set_key_color.argtypes=[c_void_p ,Color ]
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(c_void_p(con), col)
_lib.TCOD_console_set_dirty.restype=c_void
_lib.TCOD_console_set_dirty.argtypes=[c_int, c_int, c_int, c_int]
def console_set_dirty(x, y, w, h):
return _lib.TCOD_console_set_dirty(x, y, w, h)
_lib.TCOD_console_delete.argtypes=[c_void_p ]
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
_lib.TCOD_console_fill_foreground.restype=c_void
_lib.TCOD_console_fill_foreground.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int), POINTER(c_int)]
def console_fill_foreground(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int32)
g = numpy.ascontiguousarray(g, dtype=numpy.int32)
b = numpy.ascontiguousarray(b, dtype=numpy.int32)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_foreground(c_void_p(con), cr, cg, cb)
_lib.TCOD_console_fill_background.restype=c_void
_lib.TCOD_console_fill_background.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int), POINTER(c_int)]
def console_fill_background(con,r,g,b) :
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int32)
g = numpy.ascontiguousarray(g, dtype=numpy.int32)
b = numpy.ascontiguousarray(b, dtype=numpy.int32)
cr = r.ctypes.data_as(POINTER(c_int))
cg = g.ctypes.data_as(POINTER(c_int))
cb = b.ctypes.data_as(POINTER(c_int))
else:
# otherwise convert using ctypes arrays
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
_lib.TCOD_console_fill_background(c_void_p(con), cr, cg, cb)
_lib.TCOD_console_fill_char.restype=c_void
_lib.TCOD_console_fill_char.argtypes=[c_void_p , POINTER(c_int)]
def console_fill_char(con,arr) :
if (numpy_available and isinstance(arr, numpy.ndarray) ):
#numpy arrays, use numpy's ctypes functions
arr = numpy.ascontiguousarray(arr, dtype=numpy.int32)
carr = arr.ctypes.data_as(POINTER(c_int))
else:
carr = (c_int * len(arr))(*arr)
_lib.TCOD_console_fill_char(c_void_p(con), carr)
_lib.TCOD_console_load_asc.restype=c_bool
_lib.TCOD_console_load_asc.argtypes=[c_void_p , c_char_p]
def console_load_asc(con, filename) :
return _lib.TCOD_console_load_asc(con,convert_to_ascii(filename))
_lib.TCOD_console_save_asc.restype=c_bool
_lib.TCOD_console_save_asc.argtypes=[c_void_p , c_char_p]
def console_save_asc(con, filename) :
return _lib.TCOD_console_save_asc(con,convert_to_ascii(filename))
_lib.TCOD_console_load_apf.restype=c_bool
_lib.TCOD_console_load_apf.argtypes=[c_void_p , c_char_p]
def console_load_apf(con, filename) :
return _lib.TCOD_console_load_apf(con,convert_to_ascii(filename))
_lib.TCOD_console_save_apf.restype=c_bool
_lib.TCOD_console_save_apf.argtypes=[c_void_p , c_char_p]
def console_save_apf(con, filename) :
return _lib.TCOD_console_save_apf(con,convert_to_ascii(filename))
############################
# sys module
############################
_lib.TCOD_sys_startup.restype=c_void
_lib.TCOD_sys_startup.argtypes=[]
def sys_startup():
_lib.TCOD_sys_startup()
_lib.TCOD_sys_shutdown.restype=c_void
_lib.TCOD_sys_shutdown.argtypes=[]
def sys_shutdown():
_lib.TCOD_sys_shutdown()
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
_lib.TCOD_sys_set_fps.restype=c_void
_lib.TCOD_sys_set_fps.argtypes=[c_int]
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
_lib.TCOD_sys_get_fps.restype=c_int
_lib.TCOD_sys_get_fps.argtypes=[]
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
_lib.TCOD_sys_get_last_frame_length.restype=c_float
_lib.TCOD_sys_get_last_frame_length.argtypes=[]
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
_lib.TCOD_sys_sleep_milli.restype=c_void
_lib.TCOD_sys_sleep_milli.argtypes=[c_uint ]
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(val)
_lib.TCOD_sys_elapsed_milli.restype=c_int
_lib.TCOD_sys_elapsed_milli.argtypes=[]
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
_lib.TCOD_sys_elapsed_seconds.restype=c_float
_lib.TCOD_sys_elapsed_seconds.argtypes=[]
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
_lib.TCOD_sys_set_renderer.restype=c_void
_lib.TCOD_sys_set_renderer.argtypes=[c_int ]
def sys_set_renderer(renderer):
_lib.TCOD_sys_set_renderer(renderer)
_lib.TCOD_sys_get_renderer.restype=c_int
_lib.TCOD_sys_get_renderer.argtypes=[]
def sys_get_renderer():
return _lib.TCOD_sys_get_renderer()
# easy screenshots
_lib.TCOD_sys_save_screenshot.restype=c_void
_lib.TCOD_sys_save_screenshot.argtypes=[c_char_p]
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(convert_to_ascii(name))
# clipboard support
# This maps to the SDL2 API, so only uses utf-8 for both Python 2 and 3.
_lib.TCOD_sys_clipboard_set.restype=c_bool
_lib.TCOD_sys_clipboard_set.argtypes=[c_char_p]
def sys_clipboard_set(text):
return _lib.TCOD_sys_clipboard_set(text.encode("utf-8"))
_lib.TCOD_sys_clipboard_get.restype=c_char_p
_lib.TCOD_sys_clipboard_get.argtypes=[]
def sys_clipboard_get():
return _lib.TCOD_sys_clipboard_get().decode("utf-8")
# custom fullscreen resolution
_lib.TCOD_sys_force_fullscreen_resolution.restype=c_void
_lib.TCOD_sys_force_fullscreen_resolution.argtypes=[c_int, c_int]
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
_lib.TCOD_sys_get_current_resolution.restype=c_void
_lib.TCOD_sys_get_current_resolution.argtypes=[POINTER(c_int), POINTER(c_int)]
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
_lib.TCOD_sys_get_fullscreen_offsets.restype=c_void
_lib.TCOD_sys_get_fullscreen_offsets.argtypes=[POINTER(c_int), POINTER(c_int)]
_lib.TCOD_sys_get_char_size.restype=c_void
_lib.TCOD_sys_get_char_size.argtypes=[POINTER(c_int), POINTER(c_int)]
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
_lib.TCOD_sys_update_char.restype=c_void
_lib.TCOD_sys_update_char.argtypes=[c_int, c_int, c_int, c_void_p , c_int, c_int]
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(asciiCode,fontx,fonty,img,x,y)
# custom SDL post renderer
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
def sys_register_SDL_renderer(callback):
global sdl_renderer_func
sdl_renderer_func = SDL_RENDERER_FUNC(callback)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
# events
EVENT_NONE=0
EVENT_KEY_PRESS=1
EVENT_KEY_RELEASE=2
EVENT_KEY=EVENT_KEY_PRESS|EVENT_KEY_RELEASE
EVENT_MOUSE_MOVE=4
EVENT_MOUSE_PRESS=8
EVENT_MOUSE_RELEASE=16
EVENT_MOUSE=EVENT_MOUSE_MOVE|EVENT_MOUSE_PRESS|EVENT_MOUSE_RELEASE
EVENT_ANY=EVENT_KEY|EVENT_MOUSE
_lib.TCOD_sys_check_for_event.restype=c_int
_lib.TCOD_sys_check_for_event.argtypes=[c_int, c_void_p, c_void_p]
def sys_check_for_event(mask,k,m) :
return _lib.TCOD_sys_check_for_event(mask,byref(k),byref(m))
_lib.TCOD_sys_wait_for_event.restype=c_int
_lib.TCOD_sys_wait_for_event.argtypes=[c_int, c_void_p, c_void_p, c_bool ]
def sys_wait_for_event(mask,k,m,flush) :
return _lib.TCOD_sys_wait_for_event(mask,byref(k),byref(m),flush)
############################
# line module
############################
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
_lib.TCOD_line_step.restype = c_bool
_lib.TCOD_line_step.argtypes=[POINTER(c_int), POINTER(c_int)]
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
_lib.TCOD_line.restype=c_bool
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_bool,c_int,c_int)
c_callback=LINE_CBK_FUNC(py_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback)
_lib.TCOD_line_init_mt.restype=c_void
_lib.TCOD_line_init_mt.argtypes=[c_int, c_int, c_int, c_int, c_void_p]
_lib.TCOD_line_step_mt.restype = c_bool
_lib.TCOD_line_step_mt.argtypes=[POINTER(c_int), POINTER(c_int), c_void_p]
def line_iter(xo, yo, xd, yd):
data = (c_int * 9)() # struct TCOD_bresenham_data_t
_lib.TCOD_line_init_mt(xo, yo, xd, yd, data)
x = c_int(xo)
y = c_int(yo)
done = False
while not done:
yield x.value, y.value
done = _lib.TCOD_line_step_mt(byref(x), byref(y), data)
############################
# image module
############################
_lib.TCOD_image_new.restype=c_void_p
_lib.TCOD_image_new.argtypes=[c_int, c_int]
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
_lib.TCOD_image_clear.restype=c_void
_lib.TCOD_image_clear.argtypes=[c_void_p , Color ]
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
_lib.TCOD_image_invert.restype=c_void
_lib.TCOD_image_invert.argtypes=[c_void_p ]
def image_invert(image) :
_lib.TCOD_image_invert(image)
_lib.TCOD_image_hflip.restype=c_void
_lib.TCOD_image_hflip.argtypes=[c_void_p ]
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
_lib.TCOD_image_rotate90.restype=c_void
_lib.TCOD_image_rotate90.argtypes=[c_void_p , c_int]
def image_rotate90(image, num=1) :
_lib.TCOD_image_rotate90(image,num)
_lib.TCOD_image_vflip.restype=c_void
_lib.TCOD_image_vflip.argtypes=[c_void_p ]
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
_lib.TCOD_image_scale.restype=c_void
_lib.TCOD_image_scale.argtypes=[c_void_p , c_int, c_int]
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,neww,newh)
_lib.TCOD_image_set_key_color.restype=c_void
_lib.TCOD_image_set_key_color.argtypes=[c_void_p , Color]
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
_lib.TCOD_image_get_alpha.restype=c_int
_lib.TCOD_image_get_alpha.argtypes=[c_void_p ,c_int, c_int]
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
_lib.TCOD_image_is_pixel_transparent.restype = c_bool
_lib.TCOD_image_is_pixel_transparent.argtypes=[c_void_p , c_int, c_int]
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y))
_lib.TCOD_image_load.restype=c_void_p
_lib.TCOD_image_load.argtypes=[c_char_p]
def image_load(filename):
return _lib.TCOD_image_load(convert_to_ascii(filename))
_lib.TCOD_image_from_console.restype=c_void_p
_lib.TCOD_image_from_console.argtypes=[c_void_p ]
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
_lib.TCOD_image_refresh_console.restype=c_void
_lib.TCOD_image_refresh_console.argtypes=[c_void_p , c_void_p ]
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
_lib.TCOD_image_get_size.restype=c_void
_lib.TCOD_image_get_size.argtypes=[c_void_p , POINTER(c_int),POINTER(c_int)]
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
_lib.TCOD_image_get_pixel.restype = Color
_lib.TCOD_image_get_pixel.argtypes=[c_void_p ,c_int, c_int]
def image_get_pixel(image, x, y):
return _lib.TCOD_image_get_pixel(image, x, y)
_lib.TCOD_image_get_mipmap_pixel.restype = Color
_lib.TCOD_image_get_mipmap_pixel.argtypes=[c_void_p ,c_float,c_float, c_float, c_float]
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return _lib.TCOD_image_get_mipmap_pixel(image, c_float(x0), c_float(y0), c_float(x1), c_float(y1))
_lib.TCOD_image_put_pixel.restype=c_void
_lib.TCOD_image_put_pixel.argtypes=[ c_void_p ,c_int, c_int, Color ]
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel(image, x, y, col)
_lib.TCOD_image_blit.restype=c_void
_lib.TCOD_image_blit.argtypes=[c_void_p, c_void_p, c_float, c_float, c_int, c_float, c_float, c_float]
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle)
_lib.TCOD_image_blit_rect.restype=c_void
_lib.TCOD_image_blit_rect.argtypes=[c_void_p , c_void_p , c_int, c_int, c_int, c_int,]
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
_lib.TCOD_image_blit_2x.restype=c_void
_lib.TCOD_image_blit_2x.argtypes=[c_void_p , c_void_p , c_int, c_int, c_int, c_int, c_int, c_int]
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
_lib.TCOD_image_save.restype=c_void
_lib.TCOD_image_save.argtypes=[c_void_p, c_char_p]
def image_save(image, filename):
_lib.TCOD_image_save(image, convert_to_ascii(filename))
_lib.TCOD_image_delete.restype=c_void
_lib.TCOD_image_delete.argtypes=[c_void_p]
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int),
('y', c_int),
('dx', c_int),
('dy', c_int),
('cx', c_int),
('cy', c_int),
('dcx', c_int),
('dcy', c_int),
('lbutton', c_bool),
('rbutton', c_bool),
('mbutton', c_bool),
('lbutton_pressed', c_bool),
('rbutton_pressed', c_bool),
('mbutton_pressed', c_bool),
('wheel_up', c_bool),
('wheel_down', c_bool),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_bool
_lib.TCOD_mouse_show_cursor.restype=c_void
_lib.TCOD_mouse_show_cursor.argtypes=[c_bool ]
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
_lib.TCOD_mouse_is_cursor_visible.restype=c_bool
_lib.TCOD_mouse_is_cursor_visible.argtypes=[]
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible()
_lib.TCOD_mouse_move.restype=c_void
_lib.TCOD_mouse_move.argtypes=[c_int, c_int]
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
_lib.TCOD_mouse_get_status_wrapper.restype=c_void
_lib.TCOD_mouse_get_status_wrapper.argtypes=[c_void_p]
def mouse_get_status():
mouse=Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(mouse))
return mouse
############################
# parser module
############################
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
def __repr__(self):
return "Dice(%d, %d, %s, %s)" % (self.nb_dices, self.nb_faces,
self.multiplier, self.addsub)
class _CValue(Union):
_fields_=[('c',c_uint8),
('i',c_int),
('f',c_float),
('s',c_char_p),
# JBR03192012 See http://bugs.python.org/issue14354 for why these are not defined as their actual types
('col',c_uint8 * 3),
('dice',c_int * 4),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
_lib.TCOD_list_get.restype = c_void_p
def _convert_TCODList(clist, typ):
res = list()
for i in range(_lib.TCOD_list_size(c_void_p(clist))):
elt = _lib.TCOD_list_get(c_void_p(clist), i)
elt = cast(elt, c_void_p)
if typ == TYPE_BOOL:
elt = c_bool.from_buffer(elt).value
elif typ == TYPE_CHAR:
elt = c_char.from_buffer(elt).value
elif typ == TYPE_INT:
elt = c_int.from_buffer(elt).value
elif typ == TYPE_FLOAT:
elt = c_float.from_buffer(elt).value
elif typ == TYPE_STRING or TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
elt = cast(elt, c_char_p).value
elif typ == TYPE_COLOR:
elt = Color.from_buffer_copy(elt)
elif typ == TYPE_DICE:
# doesn't work
elt = Dice.from_buffer_copy(elt)
res.append(elt)
return res
_lib.TCOD_parser_new.restype=c_void_p
_lib.TCOD_parser_new.argtypes=[]
def parser_new():
return _lib.TCOD_parser_new()
_lib.TCOD_parser_new_struct.restype=c_void_p
_lib.TCOD_parser_new_struct.argtypes=[c_void_p , c_char_p]
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, convert_to_ascii(name))
_lib.TCOD_struct_add_flag.restype=c_void
_lib.TCOD_struct_add_flag.argtypes=[c_void_p ,c_char_p]
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, convert_to_ascii(name))
_lib.TCOD_struct_add_property.restype=c_void
_lib.TCOD_struct_add_property.argtypes=[c_void_p , c_char_p,c_int , c_bool ]
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, convert_to_ascii(name), typ, mandatory)
_lib.TCOD_struct_add_value_list.restype=c_void
_lib.TCOD_struct_add_value_list.argtypes=[c_void_p ,c_char_p, POINTER(c_char_p), c_bool ]
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(convert_to_ascii(value_list[i]), c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, convert_to_ascii(name), cvalue_list, mandatory)
_lib.TCOD_struct_add_value_list_sized.restype=c_void
_lib.TCOD_struct_add_value_list_sized.argtypes=[c_void_p ,c_char_p, POINTER(c_char_p), c_int, c_bool ]
_lib.TCOD_struct_add_list_property.restype=c_void
_lib.TCOD_struct_add_list_property.argtypes=[c_void_p , c_char_p,c_int , c_bool ]
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, convert_to_ascii(name), typ, mandatory)
_lib.TCOD_struct_add_structure.restype=c_void
_lib.TCOD_struct_add_structure.argtypes=[c_void_p ,c_void_p]
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
_lib.TCOD_struct_get_name.restype=c_char_p
_lib.TCOD_struct_get_name.argtypes=[c_void_p ]
def struct_get_name(struct):
ret = _lib.TCOD_struct_get_name(struct)
if is_python_3:
return ret.decode("utf-8")
return ret
_lib.TCOD_struct_is_mandatory.restype=c_bool
_lib.TCOD_struct_is_mandatory.argtypes=[c_void_p ,c_char_p]
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, convert_to_ascii(name))
_lib.TCOD_struct_get_type.restype=c_int
_lib.TCOD_struct_get_type.argtypes=[c_void_p , c_char_p]
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, convert_to_ascii(name))
_lib.TCOD_parser_run.restype=c_void
_lib.TCOD_parser_run.argtypes=[c_void_p , c_char_p, c_void_p]
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
col = cast(value.col, POINTER(Color)).contents
return listener.new_property(name, typ, col)
elif typ == TYPE_DICE:
dice = cast(value.dice, POINTER(Dice)).contents
return listener.new_property(name, typ, dice)
elif typ & TYPE_LIST:
return listener.new_property(name, typ,
_convert_TCODList(value.custom, typ & 0xFF))
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, convert_to_ascii(filename), byref(clistener))
else:
_lib.TCOD_parser_run(parser, convert_to_ascii(filename), 0)
_lib.TCOD_parser_delete.restype=c_void
_lib.TCOD_parser_delete.argtypes=[c_void_p ]
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
_lib.TCOD_parser_has_property.restype = c_bool
_lib.TCOD_parser_has_property.argtypes=[c_void_p, c_char_p]
def parser_has_property(parser, name):
return _lib.TCOD_parser_has_property(parser, convert_to_ascii(name))
_lib.TCOD_parser_get_bool_property.restype=c_bool
_lib.TCOD_parser_get_bool_property.argtypes=[c_void_p , c_char_p]
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, convert_to_ascii(name))
_lib.TCOD_parser_get_int_property.restype=c_int
_lib.TCOD_parser_get_int_property.argtypes=[c_void_p , c_char_p]
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, convert_to_ascii(name))
_lib.TCOD_parser_get_char_property.restype=c_int
_lib.TCOD_parser_get_char_property.argtypes=[c_void_p , c_char_p]
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, convert_to_ascii(name))
_lib.TCOD_parser_get_float_property.restype=c_float
_lib.TCOD_parser_get_float_property.argtypes=[c_void_p , c_char_p]
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, convert_to_ascii(name))
_lib.TCOD_parser_get_string_property.restype=c_char_p
_lib.TCOD_parser_get_string_property.argtypes=[c_void_p , c_char_p]
def parser_get_string_property(parser, name):
ret = _lib.TCOD_parser_get_string_property(parser, convert_to_ascii(name))
if is_python_3:
return ret.decode("utf-8")
return ret
_lib.TCOD_parser_get_color_property.restype = Color
_lib.TCOD_parser_get_color_property.argtypes=[c_void_p , c_char_p]
def parser_get_color_property(parser, name):
return _lib.TCOD_parser_get_color_property(parser, convert_to_ascii(name))
_lib.TCOD_parser_get_dice_property_py.argtypes=[c_void_p,c_char_p,POINTER(Dice)]
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(parser, convert_to_ascii(name), byref(d))
return d
_lib.TCOD_parser_get_list_property.restype=c_void_p
_lib.TCOD_parser_get_list_property.argtypes=[c_void_p , c_char_p, c_int ]
def parser_get_list_property(parser, name, typ):
clist = _lib.TCOD_parser_get_list_property(parser, convert_to_ascii(name), typ)
return _convert_TCODList(clist, typ)
_lib.TCOD_parser_get_custom_property.restype=c_void_p
_lib.TCOD_parser_get_custom_property.argtypes=[c_void_p , c_char_p]
############################
# random module
############################
RNG_MT = 0
RNG_CMWC = 1
DISTRIBUTION_LINEAR = 0
DISTRIBUTION_GAUSSIAN = 1
DISTRIBUTION_GAUSSIAN_RANGE = 2
DISTRIBUTION_GAUSSIAN_INVERSE = 3
DISTRIBUTION_GAUSSIAN_RANGE_INVERSE = 4
_lib.TCOD_random_get_instance.restype=c_void_p
_lib.TCOD_random_get_instance.argtypes=[]
def random_get_instance():
return _lib.TCOD_random_get_instance()
_lib.TCOD_random_new.restype=c_void_p
_lib.TCOD_random_new.argtypes=[c_int ]
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
_lib.TCOD_random_new_from_seed.restype=c_void_p
_lib.TCOD_random_new_from_seed.argtypes=[c_int, c_uint]
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo, seed)
_lib.TCOD_random_set_distribution.restype=c_void
_lib.TCOD_random_set_distribution.argtypes=[c_void_p , c_int ]
def random_set_distribution(rnd, dist) :
_lib.TCOD_random_set_distribution(rnd, dist)
_lib.TCOD_random_get_int.restype=c_int
_lib.TCOD_random_get_int.argtypes=[c_void_p , c_int, c_int]
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
_lib.TCOD_random_get_float.restype=c_float
_lib.TCOD_random_get_float.argtypes=[c_void_p , c_float , c_float ]
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, mi, ma)
_lib.TCOD_random_get_double.restype=c_double
_lib.TCOD_random_get_double.argtypes=[c_void_p , c_double , c_double ]
def random_get_double(rnd, mi, ma):
return _lib.TCOD_random_get_double(rnd, mi, ma)
_lib.TCOD_random_get_int_mean.restype=c_int
_lib.TCOD_random_get_int_mean.argtypes=[c_void_p , c_int, c_int, c_int]
def random_get_int_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_int_mean(rnd, mi, ma, mean)
_lib.TCOD_random_get_float_mean.restype=c_float
_lib.TCOD_random_get_float_mean.argtypes=[c_void_p , c_float , c_float , c_float ]
def random_get_float_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_float_mean(rnd, mi, ma, mean)
_lib.TCOD_random_get_double_mean.restype=c_double
_lib.TCOD_random_get_double_mean.argtypes=[c_void_p , c_double , c_double , c_double ]
def random_get_double_mean(rnd, mi, ma, mean):
return _lib.TCOD_random_get_double_mean(rnd, mi, ma, mean)
_lib.TCOD_random_dice_roll_s.restype=c_int
_lib.TCOD_random_dice_roll_s.argtypes=[c_void_p , c_char_p ]
def random_dice_roll_s(rnd, s):
return _lib.TCOD_random_dice_roll_s(rnd, convert_to_ascii(s))
_lib.TCOD_random_save.restype=c_void_p
_lib.TCOD_random_save.argtypes=[c_void_p ]
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
_lib.TCOD_random_restore.restype=c_void
_lib.TCOD_random_restore.argtypes=[c_void_p , c_void_p ]
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
_lib.TCOD_random_delete.restype=c_void
_lib.TCOD_random_delete.argtypes=[c_void_p ]
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
NOISE_DEFAULT = 0
NOISE_PERLIN = 1
NOISE_SIMPLEX = 2
NOISE_WAVELET = 4
_NOISE_PACKER_FUNC = (None,
(c_float * 1),
(c_float * 2),
(c_float * 3),
(c_float * 4),
)
_lib.TCOD_noise_new.restype=c_void_p
_lib.TCOD_noise_new.argtypes=[c_int, c_float , c_float , c_void_p ]
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, random=0):
return _lib.TCOD_noise_new(dim, h, l, random)
_lib.TCOD_noise_set_type.restype=c_void
_lib.TCOD_noise_set_type.argtypes=[c_void_p , c_int ]
def noise_set_type(n, typ) :
_lib.TCOD_noise_set_type(n,typ)
_lib.TCOD_noise_get.restype=c_float
_lib.TCOD_noise_get.argtypes=[c_void_p , POINTER(c_float)]
_lib.TCOD_noise_get_ex.restype=c_float
_lib.TCOD_noise_get_ex.argtypes=[c_void_p , POINTER(c_float), c_int ]
def noise_get(n, f, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), typ)
_lib.TCOD_noise_get_fbm.restype=c_float
_lib.TCOD_noise_get_fbm.argtypes=[c_void_p , POINTER(c_float), c_float ]
_lib.TCOD_noise_get_fbm_ex.restype=c_float
_lib.TCOD_noise_get_fbm_ex.argtypes=[c_void_p , POINTER(c_float), c_float , c_int ]
def noise_get_fbm(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_fbm_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), oc, typ)
_lib.TCOD_noise_get_turbulence.restype=c_float
_lib.TCOD_noise_get_turbulence.argtypes=[c_void_p , POINTER(c_float), c_float ]
_lib.TCOD_noise_get_turbulence_ex.restype=c_float
_lib.TCOD_noise_get_turbulence_ex.argtypes=[c_void_p , POINTER(c_float), c_float , c_int ]
def noise_get_turbulence(n, f, oc, typ=NOISE_DEFAULT):
return _lib.TCOD_noise_get_turbulence_ex(n, _NOISE_PACKER_FUNC[len(f)](*f), oc, typ)
_lib.TCOD_noise_delete.restype=c_void
_lib.TCOD_noise_delete.argtypes=[c_void_p ]
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
_lib.TCOD_map_is_in_fov.restype = c_bool
_lib.TCOD_map_is_transparent.restype = c_bool
_lib.TCOD_map_is_walkable.restype = c_bool
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
_lib.TCOD_map_new.restype=c_void_p
_lib.TCOD_map_new.argtypes=[c_int, c_int]
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
_lib.TCOD_map_copy.restype=c_void
_lib.TCOD_map_copy.argtypes=[c_void_p , c_void_p ]
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
_lib.TCOD_map_set_properties.restype=c_void
_lib.TCOD_map_set_properties.argtypes=[c_void_p , c_int, c_int, c_bool, c_bool]
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
_lib.TCOD_map_clear.restype=c_void
_lib.TCOD_map_clear.argtypes=[c_void_p , c_bool , c_bool ]
def map_clear(m,walkable=False,transparent=False):
_lib.TCOD_map_clear(m,c_int(walkable),c_int(transparent))
_lib.TCOD_map_compute_fov.restype=c_void
_lib.TCOD_map_compute_fov.argtypes=[c_void_p , c_int, c_int, c_int, c_bool, c_int ]
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_bool(light_walls), c_int(algo))
_lib.TCOD_map_set_in_fov.restype=c_void
_lib.TCOD_map_set_in_fov.argtypes=[c_void_p , c_int, c_int, c_bool ]
def map_set_in_fov(m, x, y, fov):
return _lib.TCOD_map_set_in_fov(m, x, y, fov)
_lib.TCOD_map_is_in_fov.restype=c_bool
_lib.TCOD_map_is_in_fov.argtypes=[c_void_p , c_int, c_int]
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y)
_lib.TCOD_map_is_transparent.restype=c_bool
_lib.TCOD_map_is_transparent.argtypes=[c_void_p , c_int, c_int]
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y)
_lib.TCOD_map_is_walkable.restype=c_bool
_lib.TCOD_map_is_walkable.argtypes=[c_void_p , c_int, c_int]
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y)
_lib.TCOD_map_delete.restype=c_void
_lib.TCOD_map_delete.argtypes=[c_void_p ]
def map_delete(m):
return _lib.TCOD_map_delete(m)
_lib.TCOD_map_get_width.restype=c_int
_lib.TCOD_map_get_width.argtypes=[c_void_p ]
def map_get_width(map):
return _lib.TCOD_map_get_width(map)
_lib.TCOD_map_get_height.restype=c_int
_lib.TCOD_map_get_height.argtypes=[c_void_p ]
def map_get_height(map):
return _lib.TCOD_map_get_height(map)
_lib.TCOD_map_get_nb_cells.restype=c_int
_lib.TCOD_map_get_nb_cells.argtypes=[c_void_p ]
def map_get_nb_cells(map):
return TCOD_map_get_nb_cells(map)
############################
# pathfinding module
############################
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
_lib.TCOD_path_new_using_map.restype=c_void_p
_lib.TCOD_path_new_using_map.argtypes=[c_void_p , c_float ]
def path_new_using_map(m, dcost=1.41):
return (_lib.TCOD_path_new_using_map(m, dcost), None)
_lib.TCOD_path_new_using_function.restype=c_void_p
_lib.TCOD_path_new_using_function.argtypes=[c_int, c_int, PATH_CBK_FUNC,
py_object, c_float]
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_path_new_using_function(w, h, cbk_func,
userdata, dcost), cbk_func)
_lib.TCOD_path_compute.restype = c_bool
_lib.TCOD_path_compute.argtypes=[c_void_p , c_int,c_int, c_int, c_int]
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p[0], ox, oy, dx, dy)
_lib.TCOD_path_get_origin.restype=c_void
_lib.TCOD_path_get_origin.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int)]
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p[0], byref(x), byref(y))
return x.value, y.value
_lib.TCOD_path_get_destination.restype=c_void
_lib.TCOD_path_get_destination.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int)]
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p[0], byref(x), byref(y))
return x.value, y.value
_lib.TCOD_path_size.restype=c_int
_lib.TCOD_path_size.argtypes=[c_void_p ]
def path_size(p):
return _lib.TCOD_path_size(p[0])
_lib.TCOD_path_reverse.restype=c_void
_lib.TCOD_path_reverse.argtypes=[c_void_p ]
def path_reverse(p):
_lib.TCOD_path_reverse(p[0])
_lib.TCOD_path_get.restype=c_void
_lib.TCOD_path_get.argtypes=[c_void_p , c_int, POINTER(c_int), POINTER(c_int)]
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p[0], idx, byref(x), byref(y))
return x.value, y.value
_lib.TCOD_path_is_empty.restype = c_bool
_lib.TCOD_path_is_empty.argtypes=[c_void_p ]
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p[0])
_lib.TCOD_path_walk.restype = c_bool
_lib.TCOD_path_walk.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int), c_bool]
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p[0], byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
_lib.TCOD_path_delete.restype=c_void
_lib.TCOD_path_delete.argtypes=[c_void_p ]
def path_delete(p):
_lib.TCOD_path_delete(p[0])
_lib.TCOD_dijkstra_new .restype=c_void_p
_lib.TCOD_dijkstra_new .argtypes=[c_void_p , c_float ]
def dijkstra_new(m, dcost=1.41):
return (_lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost)), None)
_lib.TCOD_dijkstra_new_using_function.restype=c_void_p
_lib.TCOD_dijkstra_new_using_function.argtypes=[c_int, c_int, PATH_CBK_FUNC,
py_object, c_float]
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
cbk_func = PATH_CBK_FUNC(func)
return (_lib.TCOD_dijkstra_new_using_function(w, h, cbk_func,
userdata, dcost), cbk_func)
_lib.TCOD_dijkstra_compute.restype=c_void
_lib.TCOD_dijkstra_compute.argtypes=[c_void_p , c_int, c_int]
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p[0], c_int(ox), c_int(oy))
_lib.TCOD_dijkstra_path_set.restype = c_bool
_lib.TCOD_dijkstra_path_set .argtypes=[c_void_p , c_int, c_int]
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p[0], c_int(x), c_int(y))
_lib.TCOD_dijkstra_get_distance.restype = c_float
_lib.TCOD_dijkstra_get_distance.argtypes=[c_void_p , c_int, c_int]
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p[0], c_int(x), c_int(y))
_lib.TCOD_dijkstra_size.restype=c_int
_lib.TCOD_dijkstra_size.argtypes=[c_void_p ]
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p[0])
_lib.TCOD_dijkstra_reverse.restype=c_void
_lib.TCOD_dijkstra_reverse.argtypes=[c_void_p ]
def dijkstra_reverse(p):
_lib.TCOD_dijkstra_reverse(p[0])
_lib.TCOD_dijkstra_get.restype=c_void
_lib.TCOD_dijkstra_get.argtypes=[c_void_p , c_int, POINTER(c_int), POINTER(c_int)]
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p[0], c_int(idx), byref(x), byref(y))
return x.value, y.value
_lib.TCOD_dijkstra_is_empty.restype = c_bool
_lib.TCOD_dijkstra_is_empty.argtypes=[c_void_p ]
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p[0])
_lib.TCOD_dijkstra_path_walk.restype = c_bool
_lib.TCOD_dijkstra_path_walk.argtypes=[c_void_p , POINTER(c_int), POINTER(c_int)]
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p[0], byref(x), byref(y)):
return x.value, y.value
return None,None
_lib.TCOD_dijkstra_delete .restype=c_void
_lib.TCOD_dijkstra_delete.argtypes=[c_void_p ]
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p[0])
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_void_p),
('father', c_void_p),
('son', c_void_p),
('x', c_int),
('y', c_int),
('w', c_int),
('h', c_int),
('position', c_int),
('level', c_uint8),
('horizontal', c_bool),
]
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# Python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal
def sethor(self,value):
self.p.contents.horizontal = value
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
_lib.TCOD_bsp_new.restype=c_void_p
_lib.TCOD_bsp_new.argtypes=[c_int, c_int, c_int, c_int]
def bsp_new(x, y, w, h):
return _lib.TCOD_bsp_new(x, y, w, h)
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_new_with_size.argtypes=[c_int,c_int,c_int, c_int]
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
_lib.TCOD_bsp_split_once.restype=c_void
_lib.TCOD_bsp_split_once.argtypes=[c_void_p, c_bool , c_int]
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
_lib.TCOD_bsp_split_recursive.restype=c_void
_lib.TCOD_bsp_split_recursive.argtypes=[c_void_p, c_void_p , c_int, ]
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
_lib.TCOD_bsp_resize.restype=c_void
_lib.TCOD_bsp_resize.argtypes=[c_void_p, c_int,c_int, c_int, c_int]
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.argtypes=[c_void_p]
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.argtypes=[c_void_p]
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.argtypes=[c_void_p]
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
_lib.TCOD_bsp_is_leaf.restype = c_bool
_lib.TCOD_bsp_is_leaf.argtypes=[c_void_p]
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p)
_lib.TCOD_bsp_contains.restype = c_bool
_lib.TCOD_bsp_contains.argtypes=[c_void_p, c_int, c_int]
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy)
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
_lib.TCOD_bsp_find_node.argtypes=[c_void_p, c_int, c_int]
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
# convert the c node into a Python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
_lib.TCOD_bsp_remove_sons.restype=c_void
_lib.TCOD_bsp_remove_sons.argtypes=[c_void_p]
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
_lib.TCOD_bsp_delete.restype=c_void
_lib.TCOD_bsp_delete.argtypes=[c_void_p]
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_new.argtypes=[c_int,c_int]
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
_lib.TCOD_heightmap_set_value.restype=c_void
_lib.TCOD_heightmap_set_value.argtypes=[c_void_p, c_int, c_int, c_float ]
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
_lib.TCOD_heightmap_add.restype=c_void
_lib.TCOD_heightmap_add.argtypes=[c_void_p, c_float ]
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
_lib.TCOD_heightmap_scale.restype=c_void
_lib.TCOD_heightmap_scale.argtypes=[c_void_p, c_float ]
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
_lib.TCOD_heightmap_clear.restype=c_void
_lib.TCOD_heightmap_clear.argtypes=[c_void_p]
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
_lib.TCOD_heightmap_clamp.restype=c_void
_lib.TCOD_heightmap_clamp.argtypes=[c_void_p, c_float , c_float ]
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
_lib.TCOD_heightmap_copy.restype=c_void
_lib.TCOD_heightmap_copy.argtypes=[c_void_p,c_void_p]
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
_lib.TCOD_heightmap_normalize.restype=c_void
_lib.TCOD_heightmap_normalize.argtypes=[c_void_p, c_float , c_float ]
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
_lib.TCOD_heightmap_lerp_hm.restype=c_void
_lib.TCOD_heightmap_lerp_hm.argtypes=[c_void_p, c_void_p, c_void_p, c_float ]
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
_lib.TCOD_heightmap_add_hm.restype=c_void
_lib.TCOD_heightmap_add_hm.argtypes=[c_void_p, c_void_p, c_void_p]
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
_lib.TCOD_heightmap_multiply_hm.restype=c_void
_lib.TCOD_heightmap_multiply_hm.argtypes=[c_void_p, c_void_p, c_void_p]
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
_lib.TCOD_heightmap_add_hill.restype=c_void
_lib.TCOD_heightmap_add_hill.argtypes=[c_void_p, c_float , c_float , c_float , c_float ]
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
_lib.TCOD_heightmap_dig_hill.restype=c_void
_lib.TCOD_heightmap_dig_hill.argtypes=[c_void_p, c_float , c_float , c_float , c_float ]
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
_lib.TCOD_heightmap_mid_point_displacement.restype = c_void
_lib.TCOD_heightmap_mid_point_displacement.argtypes = [c_void_p, c_void_p,
c_float]
def heightmap_mid_point_displacement(hm, rng, roughness):
_lib.TCOD_heightmap_mid_point_displacement(hm.p, rng, roughness)
_lib.TCOD_heightmap_rain_erosion.restype=c_void
_lib.TCOD_heightmap_rain_erosion.argtypes=[c_void_p, c_int,c_float ,c_float ,c_void_p ]
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
_lib.TCOD_heightmap_kernel_transform.restype=c_void
_lib.TCOD_heightmap_kernel_transform.argtypes=[c_void_p, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_float), c_float ,c_float ]
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY(*dx)
cdy = IARRAY(*dy)
cweight = FARRAY(*weight)
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
_lib.TCOD_heightmap_add_voronoi.restype=c_void
_lib.TCOD_heightmap_add_voronoi.argtypes=[c_void_p, c_int, c_int, POINTER(c_float),c_void_p ]
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY(*coef)
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
_lib.TCOD_heightmap_add_fbm.restype=c_void
_lib.TCOD_heightmap_add_fbm.argtypes=[c_void_p, c_void_p, c_float, c_float, c_float, c_float, c_float, c_float, c_float]
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
_lib.TCOD_heightmap_scale_fbm.restype=c_void
_lib.TCOD_heightmap_scale_fbm.argtypes=[c_void_p, c_void_p, c_float, c_float, c_float, c_float, c_float, c_float, c_float]
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
_lib.TCOD_heightmap_islandify.restype=c_void
_lib.TCOD_heightmap_islandify.argtypes=[c_void_p, c_float ,c_void_p ]
def heightmap_islandify(hm, sealevel, rnd):
return TCOD_heightmap_islandify(hm, sealevel, rnd)
_lib.TCOD_heightmap_dig_bezier.restype=c_void
_lib.TCOD_heightmap_dig_bezier.argtypes=[c_void_p, POINTER(c_int), POINTER(c_int), c_float , c_float , c_float , c_float ]
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY(*px)
cpy = IARRAY(*py)
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_get_value.argtypes=[c_void_p, c_int, c_int]
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
_lib.TCOD_heightmap_get_interpolated_value.restype=c_float
_lib.TCOD_heightmap_get_interpolated_value.argtypes=[c_void_p, c_float , c_float ]
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interpolated_value(hm.p, c_float(x),
c_float(y))
_lib.TCOD_heightmap_get_slope.restype=c_float
_lib.TCOD_heightmap_get_slope.argtypes=[c_void_p, c_int, c_int]
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
_lib.TCOD_heightmap_get_normal.restype=c_void
_lib.TCOD_heightmap_get_normal.argtypes=[c_void_p, c_float , c_float , POINTER(c_float), c_float ]
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn, c_float(waterLevel))
return cn[0], cn[1], cn[2]
_lib.TCOD_heightmap_count_cells.restype=c_int
_lib.TCOD_heightmap_count_cells.argtypes=[c_void_p, c_float , c_float ]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
_lib.TCOD_heightmap_has_land_on_border.restype = c_bool
_lib.TCOD_heightmap_has_land_on_border.argtypes=[c_void_p, c_float ]
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p, c_float(waterlevel))
_lib.TCOD_heightmap_get_minmax.restype=c_void
_lib.TCOD_heightmap_get_minmax.argtypes=[c_void_p, POINTER(c_float), POINTER(c_float)]
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
_lib.TCOD_heightmap_delete.restype=c_void
_lib.TCOD_heightmap_delete.argtypes=[c_void_p]
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_parse.restype=c_void
_lib.TCOD_namegen_parse.argtypes=[c_char_p , c_void_p ]
def namegen_parse(filename,rnd=0) :
_lib.TCOD_namegen_parse(convert_to_ascii(filename),rnd)
_lib.TCOD_namegen_generate.restype=c_char_p
_lib.TCOD_namegen_generate.argtypes=[c_char_p , c_bool ]
def namegen_generate(name) :
ret = _lib.TCOD_namegen_generate(convert_to_ascii(name), 0)
if is_python_3:
return ret.decode("utf-8")
return ret
_lib.TCOD_namegen_generate_custom.restype=c_char_p
_lib.TCOD_namegen_generate_custom.argtypes=[c_char_p , c_char_p , c_bool ]
def namegen_generate_custom(name, rule) :
ret = _lib.TCOD_namegen_generate(convert_to_ascii(name), convert_to_ascii(rule), 0)
if is_python_3:
return ret.decode("utf-8")
return ret
_lib.TCOD_namegen_get_sets.restype=c_void_p
_lib.TCOD_namegen_get_sets.argtypes=[]
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
if is_python_3:
return list(v.decode("utf-8") for v in setsa)
return list(setsa)
_lib.TCOD_namegen_destroy.restype=c_void
_lib.TCOD_namegen_destroy.argtypes=[]
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
_lib.TCOD_lex_new_intern.restype=c_void_p
_lib.TCOD_lex_new_intern.argtypes=[]
_lib.TCOD_lex_new.restype=c_void_p
_lib.TCOD_lex_new.argtypes=[POINTER(c_char_p), POINTER(c_char_p), c_char_p, ]
_lib.TCOD_lex_delete.restype=c_void
_lib.TCOD_lex_delete.argtypes=[c_void_p]
_lib.TCOD_lex_set_data_buffer.restype=c_void
_lib.TCOD_lex_set_data_buffer.argtypes=[c_void_p,c_char_p]
_lib.TCOD_lex_set_data_file.restype=c_bool
_lib.TCOD_lex_set_data_file.argtypes=[c_void_p,c_char_p]
_lib.TCOD_lex_parse.restype=c_int
_lib.TCOD_lex_parse.argtypes=[c_void_p]
_lib.TCOD_lex_parse_until_token_type.restype=c_int
_lib.TCOD_lex_parse_until_token_type.argtypes=[c_void_p,c_int]
_lib.TCOD_lex_parse_until_token_value.restype=c_int
_lib.TCOD_lex_parse_until_token_value.argtypes=[c_void_p,c_char_p]
_lib.TCOD_lex_expect_token_type.restype=c_bool
_lib.TCOD_lex_expect_token_type.argtypes=[c_void_p,c_int]
_lib.TCOD_lex_expect_token_value.restype=c_bool
_lib.TCOD_lex_expect_token_value.argtypes=[c_void_p,c_int,c_char_p]
_lib.TCOD_lex_savepoint.restype=c_void
_lib.TCOD_lex_savepoint.argtypes=[c_void_p,c_void_p]
_lib.TCOD_lex_restore.restype=c_void
_lib.TCOD_lex_restore.argtypes=[c_void_p,c_void_p]
_lib.TCOD_lex_get_last_javadoc.restype=c_char_p
_lib.TCOD_lex_get_last_javadoc.argtypes=[c_void_p]
_lib.TCOD_lex_get_token_name.restype=c_char_p
_lib.TCOD_lex_get_token_name.argtypes=[c_int]
_lib.TCOD_lex_get_last_error.restype=c_char_p
_lib.TCOD_lex_get_last_error.argtypes=[]
_lib.TCOD_lex_hextoint.restype=c_int
_lib.TCOD_lex_hextoint.argtypes=[c_char]
_lib.TCOD_sys_get_surface.restype=c_void_p
_lib.TCOD_sys_get_surface.argtypes=[c_int, c_int, c_bool ]
_lib.TCOD_sys_load_image.restype=c_void_p
_lib.TCOD_sys_load_image.argtypes=[c_char_p]
_lib.TCOD_list_new.restype=c_void_p
_lib.TCOD_list_new.argtypes=[]
_lib.TCOD_list_allocate.restype=c_void_p
_lib.TCOD_list_allocate.argtypes=[c_int]
_lib.TCOD_list_duplicate.restype=c_void_p
_lib.TCOD_list_duplicate.argtypes=[c_void_p ]
_lib.TCOD_list_delete.restype=c_void
_lib.TCOD_list_delete.argtypes=[c_void_p ]
_lib.TCOD_list_push.restype=c_void
_lib.TCOD_list_push.argtypes=[c_void_p ,c_void_p ]
_lib.TCOD_list_pop.restype=c_void_p
_lib.TCOD_list_pop.argtypes=[c_void_p ]
_lib.TCOD_list_peek.restype=c_void_p
_lib.TCOD_list_peek.argtypes=[c_void_p ]
_lib.TCOD_list_add_all.restype=c_void
_lib.TCOD_list_add_all.argtypes=[c_void_p , c_void_p ]
_lib.TCOD_list_get.restype=c_void_p
_lib.TCOD_list_get.argtypes=[c_void_p ,c_int]
_lib.TCOD_list_set.restype=c_void
_lib.TCOD_list_set.argtypes=[c_void_p ,c_void_p, c_int]
_lib.TCOD_list_begin.restype=POINTER(c_void_p)
_lib.TCOD_list_begin.argtypes=[c_void_p ]
_lib.TCOD_list_end.restype=POINTER(c_void_p)
_lib.TCOD_list_end.argtypes=[c_void_p ]
_lib.TCOD_list_reverse.restype=c_void
_lib.TCOD_list_reverse.argtypes=[c_void_p ]
_lib.TCOD_list_remove_iterator.restype=POINTER(c_void_p)
_lib.TCOD_list_remove_iterator.argtypes=[c_void_p , POINTER(c_void_p)]
_lib.TCOD_list_remove.restype=c_void
_lib.TCOD_list_remove.argtypes=[c_void_p ,c_void_p ]
_lib.TCOD_list_remove_iterator_fast.restype=POINTER(c_void_p)
_lib.TCOD_list_remove_iterator_fast.argtypes=[c_void_p , POINTER(c_void_p)]
_lib.TCOD_list_remove_fast.restype=c_void
_lib.TCOD_list_remove_fast.argtypes=[c_void_p ,c_void_p ]
_lib.TCOD_list_contains.restype=c_bool
_lib.TCOD_list_contains.argtypes=[c_void_p ,c_void_p ]
_lib.TCOD_list_clear.restype=c_void
_lib.TCOD_list_clear.argtypes=[c_void_p ]
_lib.TCOD_list_clear_and_delete.restype=c_void
_lib.TCOD_list_clear_and_delete.argtypes=[c_void_p ]
_lib.TCOD_list_size.restype=c_int
_lib.TCOD_list_size.argtypes=[c_void_p ]
_lib.TCOD_list_insert_before.restype=POINTER(c_void_p)
_lib.TCOD_list_insert_before.argtypes=[c_void_p ,c_void_p,c_int]
_lib.TCOD_list_is_empty.restype=c_bool
_lib.TCOD_list_is_empty.argtypes=[c_void_p ]
_lib.TCOD_sys_create_directory.restype=c_bool
_lib.TCOD_sys_create_directory.argtypes=[c_char_p]
_lib.TCOD_sys_delete_file.restype=c_bool
_lib.TCOD_sys_delete_file.argtypes=[c_char_p]
_lib.TCOD_sys_delete_directory.restype=c_bool
_lib.TCOD_sys_delete_directory.argtypes=[c_char_p]
_lib.TCOD_sys_is_directory.restype=c_bool
_lib.TCOD_sys_is_directory.argtypes=[c_char_p]
_lib.TCOD_sys_get_directory_content.restype=c_void_p
_lib.TCOD_sys_get_directory_content.argtypes=[c_char_p, c_char_p]
_lib.TCOD_sys_file_exists.restype=c_bool
# lib.TCOD_sys_file_exists.argtypes=[c_char_p , ...]
_lib.TCOD_sys_get_num_cores.restype=c_int
_lib.TCOD_sys_get_num_cores.argtypes=[]
_lib.TCOD_thread_wait.restype=c_void
_lib.TCOD_thread_wait.argtypes=[c_void_p ]
_lib.TCOD_mutex_new.restype=c_void_p
_lib.TCOD_mutex_new.argtypes=[]
_lib.TCOD_mutex_in.restype=c_void
_lib.TCOD_mutex_in.argtypes=[c_void_p ]
_lib.TCOD_mutex_out.restype=c_void
_lib.TCOD_mutex_out.argtypes=[c_void_p ]
_lib.TCOD_mutex_delete.restype=c_void
_lib.TCOD_mutex_delete.argtypes=[c_void_p ]
_lib.TCOD_semaphore_new.restype=c_void_p
_lib.TCOD_semaphore_new.argtypes=[c_int]
_lib.TCOD_semaphore_lock.restype=c_void
_lib.TCOD_semaphore_lock.argtypes=[c_void_p ]
_lib.TCOD_semaphore_unlock.restype=c_void
_lib.TCOD_semaphore_unlock.argtypes=[c_void_p ]
_lib.TCOD_semaphore_delete.restype=c_void
_lib.TCOD_semaphore_delete.argtypes=[ c_void_p ]
_lib.TCOD_condition_new.restype=c_void_p
_lib.TCOD_condition_new.argtypes=[]
_lib.TCOD_condition_signal.restype=c_void
_lib.TCOD_condition_signal.argtypes=[c_void_p ]
_lib.TCOD_condition_broadcast.restype=c_void
_lib.TCOD_condition_broadcast.argtypes=[c_void_p ]
_lib.TCOD_condition_wait.restype=c_void
_lib.TCOD_condition_wait.argtypes=[c_void_p , c_void_p ]
_lib.TCOD_condition_delete.restype=c_void
_lib.TCOD_condition_delete.argtypes=[ c_void_p ]
_lib.TCOD_tree_new.restype=c_void_p
_lib.TCOD_tree_new.argtypes=[]
_lib.TCOD_tree_add_son.restype=c_void
_lib.TCOD_tree_add_son.argtypes=[c_void_p, c_void_p]
_lib.TCOD_text_init.restype=c_void_p
_lib.TCOD_text_init.argtypes=[c_int, c_int, c_int, c_int, c_int]
_lib.TCOD_text_set_properties.restype=c_void
_lib.TCOD_text_set_properties.argtypes=[c_void_p , c_int, c_int, c_char_p , c_int]
_lib.TCOD_text_set_colors.restype=c_void
_lib.TCOD_text_set_colors.argtypes=[c_void_p , c_int , c_int , c_float]
_lib.TCOD_text_update.restype=c_bool
_lib.TCOD_text_update.argtypes=[c_void_p , c_int ]
_lib.TCOD_text_render.restype=c_void
_lib.TCOD_text_render.argtypes=[c_void_p , c_void_p ]
_lib.TCOD_text_get.restype=c_char_p
_lib.TCOD_text_get.argtypes=[c_void_p ]
_lib.TCOD_text_reset.restype=c_void
_lib.TCOD_text_reset.argtypes=[c_void_p ]
_lib.TCOD_text_delete.restype=c_void
_lib.TCOD_text_delete.argtypes=[c_void_p ]
_lib.TCOD_zip_new.restype=c_void_p
_lib.TCOD_zip_new.argtypes=[]
_lib.TCOD_zip_delete.restype=c_void
_lib.TCOD_zip_delete.argtypes=[c_void_p ]
_lib.TCOD_zip_put_char.restype=c_void
_lib.TCOD_zip_put_char.argtypes=[c_void_p , c_char ]
_lib.TCOD_zip_put_int.restype=c_void
_lib.TCOD_zip_put_int.argtypes=[c_void_p , c_int]
_lib.TCOD_zip_put_float.restype=c_void
_lib.TCOD_zip_put_float.argtypes=[c_void_p , c_float ]
_lib.TCOD_zip_put_string.restype=c_void
_lib.TCOD_zip_put_string.argtypes=[c_void_p , c_char_p]
_lib.TCOD_zip_put_color.restype=c_void
_lib.TCOD_zip_put_color.argtypes=[c_void_p , c_int ]
_lib.TCOD_zip_put_image.restype=c_void
_lib.TCOD_zip_put_image.argtypes=[c_void_p , c_void_p ]
_lib.TCOD_zip_put_console.restype=c_void
_lib.TCOD_zip_put_console.argtypes=[c_void_p , c_void_p ]
_lib.TCOD_zip_put_data.restype=c_void
_lib.TCOD_zip_put_data.argtypes=[c_void_p , c_int,c_void_p]
_lib.TCOD_zip_get_current_bytes.restype=c_int
_lib.TCOD_zip_get_current_bytes.argtypes=[c_void_p ]
_lib.TCOD_zip_save_to_file.restype=c_int
_lib.TCOD_zip_save_to_file.argtypes=[c_void_p , c_char_p]
_lib.TCOD_zip_load_from_file.restype=c_int
_lib.TCOD_zip_load_from_file.argtypes=[c_void_p , c_char_p]
_lib.TCOD_zip_get_char.restype=c_char
_lib.TCOD_zip_get_char.argtypes=[c_void_p ]
_lib.TCOD_zip_get_int.restype=c_int
_lib.TCOD_zip_get_int.argtypes=[c_void_p ]
_lib.TCOD_zip_get_float.restype=c_float
_lib.TCOD_zip_get_float.argtypes=[c_void_p ]
_lib.TCOD_zip_get_string.restype=c_char_p
_lib.TCOD_zip_get_string.argtypes=[c_void_p ]
_lib.TCOD_zip_get_color.restype=c_int
_lib.TCOD_zip_get_color.argtypes=[c_void_p ]
_lib.TCOD_zip_get_image.restype=c_void_p
_lib.TCOD_zip_get_image.argtypes=[c_void_p ]
_lib.TCOD_zip_get_console.restype=c_void_p
_lib.TCOD_zip_get_console.argtypes=[c_void_p ]
_lib.TCOD_zip_get_data.restype=c_int
_lib.TCOD_zip_get_data.argtypes=[c_void_p , c_int,c_void_p]
_lib.TCOD_zip_get_remaining_bytes.restype=c_int
_lib.TCOD_zip_get_remaining_bytes.argtypes=[c_void_p ]
_lib.TCOD_zip_skip_bytes.restype=c_void
_lib.TCOD_zip_skip_bytes.argtypes=[c_void_p ,c_int ]
|
bopace/artifice
|
src/libtcodpy/__init__.py
|
Python
|
mit
| 100,027
|
[
"Amber"
] |
36f2bb9a30a89df050cd26b530b589c8622fafa37ea8d925f6ab9d57d61efa24
|
## INFO ########################################################################
## ##
## formbuilder ##
## =========== ##
## ##
## Online Form Building Application ##
## Version: 0.3.01.266 (20150110) ##
## File: form.py ##
## ##
## For more information about the project, visit ##
## <https://github.com/petervaro/formbuilder>. ##
## Copyright (C) 2014 Peter Varo ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from collections import OrderedDict
# Module level constants
Q = 'question'
H = 'hint'
T = 'type'
I = 'index'
ENTRIES = Q, H, T
HINT = {'en': 'hint',
'hu': 'tipp'}
#------------------------------------------------------------------------------#
class Form:
def __init__(self, data):
# Set static values
try:
self.HINT = HINT[data['PAGE']['lang']]
except KeyError:
self.HINT = HINT['en']
# Set form title
try:
self.title = data['PAGE']['title']
except KeyError:
self.title = 'Untitled Form'
self.blocks = blocks = OrderedDict()
block = None
# Separate empty sections and questions
for section in data.sections():
section_data = data[section]
# If section is a block-title
if not len(section_data):
blocks[section] = block = []
continue
# If section contains a question
try:
question = {I: section}
block.append(question)
for entry in ENTRIES:
question[entry] = section_data.get(entry, None)
# If block does not exist yet
except AttributeError:
continue
|
petervaro/formbuilder
|
form.py
|
Python
|
gpl-3.0
| 3,592
|
[
"VisIt"
] |
abbd160043e246a71d792f164db1e603634f4a10836a1b2cb8c684fc1c6de411
|
# ========================== Start Copyright Notice ========================== #
# #
# Copyright 2014 F.D.I.S. #
# This file is part of Kinetic Gunner: Gunner of Angst #
# #
# For the latest version, please visit: #
# https://github.com/CertainlyUncertain/Kinetic-Gunner-Gunner-of-Angst #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# =========================== End Copyright Notice =========================== #
# Networking Manager --------------------------------------------------------- #
class NetMgr:
''' Create networking threads, find clients/servers, sync entities. '''
def __init__(self, engine):
''' Creation. '''
self.engine = engine
print "Network Manager Created."
pass
def init(self):
''' Initialization. '''
print "Network Manager Initialized."
pass
def crosslink(self):
''' Link to other Managers. '''
print "Network Manager Linked."
pass
def tick(self, dt):
''' Update. '''
pass
def stop(self):
''' Shut Down. '''
print "Network Manager Stopped."
pass
# Networking Manager --------------------------------------------------------- #
|
CertainlyUncertain/Kinetic-Gunner-Gunner-of-Angst
|
netMgr.py
|
Python
|
gpl-3.0
| 2,584
|
[
"VisIt"
] |
961208af75fadfb84c66d53f456bd6f2ae7f327e21e03e961ae9478939478a9b
|
from __future__ import print_function, division
import numpy as np
import sys
from scipy.optimize import fmin
from scipy.special import erf
import scipy.stats
from copy import deepcopy
# Constants
kb = 0.00831447215 # kJ/(K*mol)
class Jarz(object):
'''Jarzynski estimator.
Description...
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
T : float or int
nboots : int
number of bootstrap samples to use for error estimation.
Examples
--------
Attributes
----------
'''
def __init__(self, wf, wr, T, nboots=0, nblocks=1):
self.wf = np.array(wf)
self.wr = np.array(wr)
self.T = float(T)
self.nboots = nboots
self.nblocks = nblocks
# Calculate all Jarz properties available
self.dg_for = self.calc_dg(w=self.wf, c=1.0, T=self.T)
self.dg_rev = -1.0 * self.calc_dg(w=self.wr, c=-1.0, T=self.T)
self.dg_mean = (self.dg_for + self.dg_rev) * 0.5
if nboots > 0:
self.err_boot_for = self.calc_err_boot(w=self.wf, T=self.T,
c=1.0, nboots=nboots)
self.err_boot_rev = self.calc_err_boot(w=self.wr, T=self.T,
c=-1.0, nboots=nboots)
if nblocks > 1:
self.err_blocks_for = self.calc_err_blocks(w=self.wf, c=1.0,
T=self.T,
nblocks=nblocks)
self.err_blocks_rev = self.calc_err_blocks(w=self.wr, c=-1.0,
T=self.T,
nblocks=nblocks)
@staticmethod
def calc_dg(w, T, c):
'''to be filled
'''
beta = 1./(kb*T)
n = float(len(w))
mexp = 0.0
m = 0.0
m2 = 0.0
for i in w:
mexp = mexp + np.exp(-beta*c*i)
m = m + c*i
m2 = m2 + i*i
mexp = mexp/n
m = m/n
m2 = m2/n
var = (m2-m*m)*(n/(n-1))
# Jarzynski estimator
dg = -kb*T*np.log(mexp)
# Fluctuation-Dissipation estimator
# FIXME: unused atm, remove or return?
dg2 = m - beta*var/2.0
return dg
@staticmethod
def calc_err_boot(w, T, c, nboots):
'''Calculates the standard error via bootstrap. The work values are
resampled randomly with replacement multiple (nboots) times,
and the Jarzinski free energy recalculated for each bootstrap samples.
The standard error of the estimate is returned as the standard d
eviation of the bootstrapped free energies.
Parameters
----------
w : array_like
work values.
T : float
temperature.
c : [0,1]
???
nboots: int
number of bootstrap samples to use for the error estimate.
Returns
-------
err : float
standard error of the mean.
'''
dg_boots = []
n = len(w)
for k in range(nboots):
sys.stdout.write('\r Bootstrap (Std Err): iteration %s/%s'
% (k+1, nboots))
sys.stdout.flush()
boot = np.random.choice(w, size=n, replace=True)
dg_boot = -1.0 * Jarz.calc_dg(boot, T, c)
dg_boots.append(dg_boot)
sys.stdout.write('\n')
err = np.std(dg_boots)
return err
@staticmethod
def calc_err_blocks(w, T, c, nblocks):
'''Calculates the standard error based on a number of blocks the
work values are divided into. It is useful when you run independent
equilibrium simulations, so that you can then use their respective
work values to compute the standard error based on the repeats.
Parameters
----------
w : array_like
array of work values.
T : float
temperature.
c : [0,1]
???
nblocks: int
number of blocks to divide the data into. This can be for
instance the number of independent equilibrium simulations
you ran.
'''
dg_blocks = []
# loosely split the arrays
w_split = np.array_split(w, nblocks)
# calculate all dg
for w_block in w_split:
dg_block = -1.0 * Jarz.calc_dg(w_block, T, c)
dg_blocks.append(dg_block)
# get std err
err_blocks = scipy.stats.sem(dg_blocks, ddof=1)
return err_blocks
class Crooks(object):
'''Crooks Gaussian Intersection (CGI) estimator. The forward and reverse work
values are fitted to Gaussian functions and their intersection is taken
as the free energy estimate. In some cases, when the two Gaussians are very
close to each other, the intersection cannot be taken and the average of
the two Gaussian means is taken as the free energy estimate insted. The
standard error is by default calculated via bootstrap using 1000 samples.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
Examples
--------
>>> Crooks(wf, wr)
>>> cgi_dg = Crooks.dg
>>> cgi_err = Crooks.err_boot
Attributes
----------
dg : float
the free energy estimate.
err_boot : float
standard error of the free energy estimate calculated via bootstrap.
inters_bool : bool
whether the interection could be taken. If False, the free energy
estimate is the average of the two Gaussian means.
Af : float
height of the forward Gaussian.
mf : float
mean of the forward Gaussian.
devf : float
standard deviation of the forward Gaussian.
Ar : float
height of the reverse Gaussian.
mr : float
mean of the reverse Gaussian.
devr : float
standard deviation of the reverse Gaussian.
'''
def __init__(self, wf, wr, nboots=0, nblocks=1):
# inputs
self.wf = np.array(wf)
self.wr = np.array(wr)
self.nboots = nboots
self.nblocks = nblocks
# params of the gaussians
self.mf, self.devf, self.Af = data2gauss(wf)
self.mr, self.devr, self.Ar = data2gauss(wr)
# Calculate Crooks properties
self.dg, self.inters_bool = self.calc_dg(wf=self.wf, wr=self.wr)
self.err_boot1 = self.calc_err_boot1(m1=self.mf, s1=self.devf,
n1=len(wf), m2=self.mr,
s2=self.devr, n2=len(wr),
nboots=1000)
if nboots > 0:
self.err_boot2 = self.calc_err_boot2(wf=self.wf, wr=self.wr,
nboots=nboots)
if nblocks > 1:
self.err_blocks = self.calc_err_blocks(self.wf, self.wr, nblocks)
@staticmethod
def calc_dg(wf, wr):
'''Calculates the free energy difference using the Crooks Gaussian
Intersection method. It finds the intersection of two Gaussian
functions. If the intersection cannot be computed, the average of
the two Gaussian locations is returned.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
Returns
-------
float
location of the intersection.
bool
whether the intersection could be calculated. If the intersection
was calculated as expected a True value is returned.
If the Gaussians are too close to each other, the intersection
cannot be calculated and a False value is returned; in this case,
the first float value retured is the average of the Gaussian means.
'''
m1, s1, A1 = data2gauss(wf)
m2, s2, A2 = data2gauss(wr)
p1 = m1/s1**2-m2/s2**2
p2 = np.sqrt(1/(s1**2*s2**2)*(m1-m2)**2+2*(1/s1**2-1/s2**2)*np.log(s2/s1))
p3 = 1/s1**2-1/s2**2
x1 = (p1+p2)/p3
x2 = (p1-p2)/p3
# determine which solution to take
if x1 > m1 and x1 < m2 or x1 > m2 and x1 < m1:
dg = x1
return dg, True
elif x2 > m1 and x2 < m2 or x2 > m2 and x2 < m1:
dg = x2
return dg, True
else:
# we do not take the intersection but the average of the means
dg = (m1 + m2) * 0.5
return dg, False
# Possible change of behaviour compared to the original script:
# here it is not determined in advanced whether to take the intersection
# or the mean, but for each bootstrap sample if the intersecion cannot
# be taken, then the mean is used automatically.
@staticmethod
def calc_err_boot1(m1, s1, n1, m2, s2, n2, nboots=1000):
'''Calculates the standard error of the Crooks Gaussian Intersection
via parametric bootstrap. Given the parameters of the forward and
reverse Gaussian distributions, multiple (nboots) bootstrap samples
are built by random sampling from these two Gaussian distributions.
The CGI free energy is then calculated for each bootstrap sample
(forward and reverse Gaussians). The standard error of the estimate
is returned as the standard deviation of the bootstrapped free
energies.
Parameters
----------
m1 : float
mean of the forward Gaussian.
s1 : float
standard deviation of the forward Gaussian.
n1 : int
number of work values to which the first Gaussian was fit.
m2 : float
mean of the reverse Gaussian.
s2 : float
standard deviation of the reverse Gaussian.
n2 : int
number of work values to which the second Gaussian was fit.
nboots: int
number of bootstrap samples to use for the error estimate.
Parametric bootstrap is used where work values are resampled from
two Gaussians.
Returns
-------
float
standard error of the mean.
'''
dg_boots = []
for k in range(nboots):
bootA = np.random.normal(loc=m1, scale=s1, size=n1)
bootB = np.random.normal(loc=m2, scale=s2, size=n2)
dg_boot, _ = Crooks.calc_dg(bootA, bootB)
dg_boots.append(dg_boot)
err = np.std(dg_boots)
return err
@staticmethod
def calc_err_boot2(wf, wr, nboots):
'''Calculates the standard error of the Crooks Gaussian Intersection
via non-parametric bootstrap. The work values are resampled randomly
with replacement multiple (nboots) times, and the CGI free energy
recalculated for each bootstrap samples. The standard error of
the estimate is returned as the standard deviation of the bootstrapped
free energies.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
nboots: int
number of bootstrap samples to use for the error estimate.
Returns
-------
err : float
standard error of the mean.
'''
nf = len(wf)
nr = len(wr)
dg_boots = []
for k in range(nboots):
sys.stdout.write('\r Bootstrap (Std Err): iteration %s/%s'
% (k+1, nboots))
sys.stdout.flush()
bootA = np.random.choice(wf, size=nf, replace=True)
bootB = np.random.choice(wr, size=nr, replace=True)
dg_boot, _ = Crooks.calc_dg(bootA, bootB)
dg_boots.append(dg_boot)
sys.stdout.write('\n')
err = np.std(dg_boots)
return err
@staticmethod
def calc_err_blocks(wf, wr, nblocks):
'''Calculates the standard error based on a number of blocks the
work values are divided into. It is useful when you run independent
equilibrium simulations, so that you can then use their respective
work values to compute the standard error based on the repeats.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
nblocks: int
number of blocks to divide the data into. This can be for
instance the number of independent equilibrium simulations
you ran.
'''
dg_blocks = []
# loosely split the arrays
wf_split = np.array_split(wf, nblocks)
wr_split = np.array_split(wr, nblocks)
# calculate all dg
for wf_block, wr_block in zip(wf_split, wr_split):
dg_block, _ = Crooks.calc_dg(wf_block, wr_block)
dg_blocks.append(dg_block)
# get std err
err_blocks = scipy.stats.sem(dg_blocks, ddof=1)
return err_blocks
class BAR(object):
'''Bennett acceptance ratio (BAR).
Description...
Parameters
----------
Examples
--------
'''
def __init__(self, wf, wr, T, nboots=0, nblocks=1):
self.wf = np.array(wf)
self.wr = np.array(wr)
self.T = float(T)
self.nboots = nboots
self.nblocks = nblocks
self.nf = len(wf)
self.nr = len(wr)
self.beta = 1./(kb*self.T)
self.M = kb * self.T * np.log(float(self.nf) / float(self.nr))
# Calculate all BAR properties available
self.dg = self.calc_dg(self.wf, self.wr, self.T)
self.err = self.calc_err(self.dg, self.wf, self.wr, self.T)
if nboots > 0:
self.err_boot = self.calc_err_boot(self.wf, self.wr, nboots,
self.T)
self.conv = self.calc_conv(self.dg, self.wf, self.wr, self.T)
if nboots > 0:
self.conv_err_boot = self.calc_conv_err_boot(self.dg, self.wf,
self.wr, nboots,
self.T)
if nblocks > 1:
self.err_blocks = self.calc_err_blocks(self.wf, self.wr, nblocks,
self.T)
@staticmethod
def calc_dg(wf, wr, T):
'''Estimates and returns the free energy difference.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
T : float
temperature
Returns
----------
dg : float
the BAR free energy estimate.
'''
nf = float(len(wf))
nr = float(len(wr))
beta = 1./(kb*T)
M = kb * T * np.log(nf/nr)
def func(x, wf, wr):
sf = 0
for v in wf:
sf += 1./(1+np.exp(beta*(M+v-x)))
sr = 0
for v in wr:
sr += 1./(1+np.exp(-beta*(M+v-x)))
r = sf-sr
return r**2
avA = np.average(wf)
avB = np.average(wr)
x0 = (avA+avB)/2.
dg = fmin(func, x0=x0, args=(wf, wr), disp=0)
return float(dg)
@staticmethod
def calc_err(dg, wf, wr, T):
'''Calculates the analytical error estimate.
Parameters
----------
dg : float
the BAR free energy estimate
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
T : float
temperature
'''
nf = float(len(wf))
nr = float(len(wr))
beta = 1./(kb*T)
M = kb * T * np.log(nf/nr)
err = 0
for v in wf:
err += 1./(2+2*np.cosh(beta * (M+v-dg)))
for v in wr:
err += 1./(2+2*np.cosh(beta * (M+v-dg)))
N = nf + nr
err /= float(N)
tot = 1/(beta**2*N)*(1./err-(N/nf + N/nr))
err = float(np.sqrt(tot))
return err
@staticmethod
def calc_err_boot(wf, wr, nboots, T):
'''Calculates the error by bootstrapping.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
T : float
temperature
nboots: int
number of bootstrap samples.
'''
nf = len(wf)
nr = len(wr)
dg_boots = []
for k in range(nboots):
sys.stdout.write('\r Bootstrap (Std Err): iteration %s/%s'
% (k+1, nboots))
sys.stdout.flush()
bootA = np.random.choice(wf, size=nf, replace=True)
bootB = np.random.choice(wr, size=nr, replace=True)
dg_boot = BAR.calc_dg(bootA, bootB, T)
dg_boots.append(dg_boot)
sys.stdout.write('\n')
err_boot = np.std(dg_boots)
return err_boot
@staticmethod
def calc_err_blocks(wf, wr, nblocks, T):
'''Calculates the standard error based on a number of blocks the
work values are divided into. It is useful when you run independent
equilibrium simulations, so that you can then use their respective
work values to compute the standard error based on the repeats.
Parameters
----------
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
T : float
temperature
nblocks: int
number of blocks to divide the data into. This can be for
instance the number of independent equilibrium simulations
you ran.
'''
dg_blocks = []
# loosely split the arrays
wf_split = np.array_split(wf, nblocks)
wr_split = np.array_split(wr, nblocks)
# calculate all dg
for wf_block, wr_block in zip(wf_split, wr_split):
dg_block = BAR.calc_dg(wf_block, wr_block, T)
dg_blocks.append(dg_block)
# get std err
err_blocks = scipy.stats.sem(dg_blocks, ddof=1)
return err_blocks
@staticmethod
def calc_conv(dg, wf, wr, T):
'''Evaluates BAR convergence as described in Hahn & Then, Phys Rev E
(2010), 81, 041117. Returns a value between -1 and 1: the closer this
value to zero the better the BAR convergence.
Parameters
----------
dg : float
the BAR free energy estimate
wf : array_like
array of forward work values.
wr : array_like
array of reverse work values.
T : float
temperature
'''
wf = np.array(wf)
wr = np.array(wr)
beta = 1./(kb*T)
nf = len(wf)
nr = len(wr)
N = float(nf + nr)
ratio_alpha = float(nf)/N
ratio_beta = float(nr)/N
bf = 1.0/(ratio_beta + ratio_alpha * np.exp(beta*(wf-dg)))
tf = 1.0/(ratio_alpha + ratio_beta * np.exp(beta*(-wr+dg)))
Ua = (np.mean(tf) + np.mean(bf))/2.0
Ua2 = (ratio_alpha * np.mean(np.power(tf, 2)) +
ratio_beta * np.mean(np.power(bf, 2)))
conv = (Ua-Ua2)/Ua
return conv
@staticmethod
def calc_conv_err_boot(dg, wf, wr, nboots, T):
nf = len(wf)
nr = len(wr)
conv_boots = []
for k in range(nboots):
sys.stdout.write('\r Bootstrap (Conv): '
'iteration %s/%s' % (k+1, nboots))
sys.stdout.flush()
bootA = np.random.choice(wf, size=nf, replace=True)
bootB = np.random.choice(wr, size=nr, replace=True)
conv_boot = BAR.calc_conv(dg, bootA, bootB, T)
conv_boots.append(conv_boot)
sys.stdout.write('\n')
err = np.std(conv_boots)
return err
# ==============================================================================
# FUNCTIONS
# ==============================================================================
def ks_norm_test(data, alpha=0.05, refks=None):
'''Performs a Kolmogorov-Smirnov test of normality.
Parameters
----------
data : array_like
a one-dimensional array of values. This is the distribution tested
for normality.
alpha : float
significance level of the statistics. Default if 0.05.
refks : ???
???
Returns
-------
Q : float
lam0 : float
check : float
bOk : bool
'''
def ksref():
f = 1
potent = 10000
lamb = np.arange(0.25, 2.5, 0.001)
q = np.zeros(len(lamb), float)
res = []
for k in range(-potent, potent):
q = q + f*np.exp(-2.0*(k**2)*(lamb**2))
f = -f
for i in range(len(lamb)):
res.append((lamb[i], q[i]))
return res
def ksfunc(lamb):
f = 1
potent = 10000
q = 0
for k in range(-potent, potent):
q = q + f*np.exp(-2.0*(k**2)*(lamb**2))
f *= -1
return q
def edf(dg_data):
edf_ = []
ndata = []
data = deepcopy(dg_data)
data.sort()
N = float(len(data))
cnt = 0
for item in data:
cnt += 1
edf_.append(cnt/N)
ndata.append(item)
ndata = np.array(ndata)
edf_ = np.array(edf_)
return ndata, edf_
def cdf(dg_data):
data = deepcopy(dg_data)
data.sort()
mean = np.average(data)
sig = np.std(data)
cdf = 0.5*(1+erf((data-mean)/float(sig*np.sqrt(2))))
return cdf
N = len(data)
nd, ed = edf(data)
cd = cdf(data)
siglev = 1-alpha
dval = []
for i, val in enumerate(ed):
d = abs(val-cd[i])
dval.append(d)
if i:
d = abs(ed[i-1]-cd[i])
dval.append(d)
dmax = max(dval)
check = np.sqrt(N)*dmax
if not refks:
refks = ksref()
lst = filter(lambda x: x[1] > siglev, refks)
lam0 = lst[0][0]
if check >= lam0:
bOk = False
else:
bOk = True
q = ksfunc(check)
return (1-q), lam0, check, bOk
def data2gauss(data):
'''Takes a one dimensional array and fits a Gaussian.
Returns
-------
float
mean of the distribution.
float
standard deviation of the distribution.
float
height of the curve's peak.
'''
m = np.average(data)
dev = np.std(data)
A = 1./(dev*np.sqrt(2*np.pi))
return m, dev, A
|
dseeliger/pmx
|
pmx/estimators.py
|
Python
|
lgpl-3.0
| 23,063
|
[
"Gaussian"
] |
6daca56247c21fa6b23ce70a28239979a8cfab11d4d3de0d1061abdd63ee9c58
|
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from collections import defaultdict
from elasticsearch import Elasticsearch
from flask import current_app as app
from flask import jsonify
from flask import request
import json
import yaml
import logging
from rhoci.models.job import Job
from rhoci.jenkins.osp import get_release
LOG = logging.getLogger(__name__)
from rhoci.api import bp # noqa
PROJECTION = {'name': 1, 'last_build': 1, 'release': 1, 'last_successful_build': 1}
@bp.route('/jobs', methods=['GET', 'POST'])
def jobs(query_str=None):
"""All jobs API route."""
q_str = request.args.get('query_str', default={})
if q_str:
query_str = eval(q_str)
else:
query_str = {}
results = {'data': Job.find(query_str=query_str, projection=PROJECTION)}
return jsonify(results)
@bp.route('/jobs/filtered', methods=['GET', 'POST'])
@bp.route('/jobs/filtered?filters=<filters>', methods=['GET', 'POST'])
def get_filtered_jobs(filters=None):
filters = request.args.get('filters')
if filters and filters != "undefined":
filters_dict = json.loads(filters)
else:
filters_dict = {}
results = {'data': []}
es = Elasticsearch(app.config['custom']['elk']['es']['url'])
body = {
"query": {
"bool": {}},
"size": 0,
"aggs": {
"jobs": {
"terms": {"field": "job_name.keyword",
"size": 1000},
"aggs": {
"builds": {
"terms": {"field": "build_num"},
"aggs": {
"status": {
"terms": {"field": "build_result.keyword"}
}
}
}}}}}
if filters_dict:
body["query"]["bool"]["filter"] = []
filters_modified = {"{}.keyword".format(k):v for k,v in filters_dict.items()}
for f,v in filters_modified.items():
body["query"]["bool"]["filter"].append({ "term": {f:v} })
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
if job['builds']['buckets'] and job['builds']['buckets'][-1]['status']['buckets']:
status = job['builds']['buckets'][-1]['status']['buckets'][-1]['key']
else:
status = "None"
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': status})
return jsonify(results)
@bp.route('/jobs/<DFG_name>/<status>')
@bp.route('/jobs/DFG=<DFG_name>')
@bp.route('/jobs/<job_name>')
@bp.route('/jobs/all')
def get_jobs(DFG_name=None, squad_name=None,
component_name=None, job_name=None, status=None):
"""Returns jobs."""
jobs = defaultdict(dict)
results = {'data': []}
es = Elasticsearch(app.config['custom']['elk']['es']['url'])
body = {
"query": {
"bool": {
"must": [{"exists": {"field": "build_result.keyword"}} ],
# "filter": [
# { "term": { "DFG.keyword": DFG_name}}
#]
}},
"size": 0,
"aggs": {
"jobs": {
"terms": {"field": "job_name.keyword",
"size": 1000},
"aggs": {
"builds": {
"terms": {"field": "build_num"},
"aggs": {
"status": {
"terms": {"field": "build_result.keyword"}
}
}
}}}}}
if DFG_name and status:
body["query"]["bool"]["filter"] = [{ "term": { "DFG.keyword": DFG_name}}]
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
if job['builds']['buckets'][-1]['status']['buckets'][-1]['key'] == status:
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': status})
elif DFG_name:
body["query"]["bool"]["filter"] = [{ "term": { "DFG.keyword": DFG_name}}]
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': job['builds']['buckets'][-1]['status']['buckets'][-1]['key']})
else:
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': job['builds']['buckets'][-1]['status']['buckets'][-1]['key']})
return jsonify(results)
|
bregman-arie/rhoci
|
rhoci/api/job.py
|
Python
|
apache-2.0
| 5,401
|
[
"Elk"
] |
a14573d109f95b759aea077c2efc54664db345eb27137bf7c0900e91470ca885
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Functions and classes related to array handling
"""
__all__ = ["createXYs",
"extendArr",
"makeRadialMatrix",
"ratioRange",
"shuffleArray",
"val2array",
"array2pointer",
"createLumPattern"]
import numpy
import ctypes
def createXYs(x, y=None):
"""Create an Nx2 array of XY values including all combinations of the
x and y values provided.
>>> createXYs(x=[1, 2, 3], y=[4, 5, 6])
array([[1, 4],
[2, 4],
[3, 4],
[1, 5],
[2, 5],
[3, 5],
[1, 6],
[2, 6],
[3, 6]])
>>> createXYs(x=[1, 2, 3]) # assumes y == x
array([[1, 1],
[2, 1],
[3, 1],
[1, 2],
[2, 2],
[3, 2],
[1, 3],
[2, 3],
[3, 3]])
"""
if y is None:
y = x
xs = numpy.resize(x, len(x) * len(y)) # [1,2,3, 1,2,3, 1,2,3]
ys = numpy.repeat(y, len(x)) # [1,1,1 ,2,2,2, 3,3,3]
return numpy.vstack([xs, ys]).transpose()
def extendArr(inArray, newSize):
"""Takes a numpy array and returns it padded with zeros
to the necessary size
>>> extendArr([1, 2, 3], 5)
array([1, 2, 3, 0, 0])
"""
if type(inArray) in [tuple, list]:
inArray = numpy.asarray(inArray)
newArr = numpy.zeros(newSize, inArray.dtype)
# create a string to eval (see comment below)
indString = ''
for thisDim in inArray.shape:
indString += '0:' + str(thisDim) + ','
indString = indString[0:-1] # remove the final comma
# e.g.
# newArr[0:4, 0:3] = inArray
exec("newArr[" + indString + "] = inArray")
return newArr
def makeRadialMatrix(matrixSize, center=(0.0, 0.0), radius=1.0):
"""DEPRECATED: please use psychopy.filters.makeRadialMatrix instead
"""
from psychopy.visual import filters
return filters.makeRadialMatrix(matrixSize, center, radius)
def ratioRange(start, nSteps=None, stop=None,
stepRatio=None, stepdB=None, stepLogUnits=None):
"""Creates a array where each step is a constant ratio
rather than a constant addition.
Specify *start* and any 2 of, *nSteps*, *stop*, *stepRatio*,
*stepdB*, *stepLogUnits*
>>> ratioRange(1,nSteps=4,stop=8)
array([ 1., 2., 4., 8.])
>>> ratioRange(1,nSteps=4,stepRatio=2)
array([ 1., 2., 4., 8.])
>>> ratioRange(1,stop=8,stepRatio=2)
array([ 1., 2., 4., 8.])
"""
badRange = "Can't calculate ratio ranges on negatives or zero"
if start <= 0:
raise RuntimeError(badRange)
if stepdB is not None:
stepRatio = 10.0 ** (stepdB / 20.0) # dB = 20*log10(ratio)
if stepLogUnits is not None:
stepRatio = 10.0 ** stepLogUnits # logUnit = log10(ratio)
if stepRatio is not None and nSteps is not None:
factors = stepRatio ** numpy.arange(nSteps, dtype='d')
output = start * factors
elif nSteps is not None and stop is not None:
if stop <= 0:
raise RuntimeError(badRange)
lgStart = numpy.log10(start)
lgStop = numpy.log10(stop)
lgStep = (lgStop - lgStart) / (nSteps - 1)
lgArray = numpy.arange(lgStart, lgStop + lgStep, lgStep)
# if the above is a badly rounded float it may have one extra entry
if len(lgArray) > nSteps:
lgArray = lgArray[:-1]
output = 10 ** lgArray
elif stepRatio is not None and stop is not None:
thisVal = float(start)
outList = []
while thisVal < stop:
outList.append(thisVal)
thisVal *= stepRatio
output = numpy.asarray(outList)
else:
# if any of the conditions above are not satisfied, throw this error.
raise ValueError('Invalid input parameters.')
return output
def shuffleArray(inArray, shuffleAxis=-1, seed=None):
"""DEPRECATED: use `numpy.random.shuffle`
"""
# arrAsList = shuffle(list(inArray))
# return numpy.array(arrAsList)
rng = numpy.random.default_rng(seed=seed)
inArray = numpy.array(inArray, 'O') # convert to array if necess
# create a random array of the same shape
rndArray = rng.random(inArray.shape)
# and get the arguments that would sort it
newIndices = numpy.argsort(rndArray, shuffleAxis)
# return the array with the sorted random indices
return numpy.take(inArray, newIndices)
def val2array(value, withNone=True, withScalar=True, length=2):
"""Helper function: converts different input to a numpy array.
Raises informative error messages if input is invalid.
withNone: True/False. should 'None' be passed?
withScalar: True/False. is a scalar an accepted input?
Will be converted to array of this scalar
length: False / 2 / 3. Number of elements input should have or be
converted to. Might be False (do not accept arrays or convert to such)
"""
if value is None:
if withNone:
return None
else:
raise ValueError('Invalid parameter. None is not accepted as '
'value.')
value = numpy.array(value, float)
if numpy.product(value.shape) == 1:
if withScalar:
# e.g. 5 becomes array([5.0, 5.0, 5.0]) for length=3
return numpy.repeat(value, length)
else:
msg = ('Invalid parameter. Single numbers are not accepted. '
'Should be tuple/list/array of length %s')
raise ValueError(msg % str(length))
elif value.shape[-1] == length:
return numpy.array(value, float)
else:
msg = 'Invalid parameter. Should be length %s but got length %s.'
raise ValueError(msg % (str(length), str(len(value))))
def array2pointer(arr, dtype=None):
"""Convert a Numpy array to a `ctypes` pointer.
Arrays are checked if they are contiguous before conversion, if not, they
will be converted to contiguous arrays.
Parameters
----------
arr : ndarray
N-dimensions array to convert, should be contiguous (C-ordered).
dtype : str or dtype, optional
Data type for the array pointer. If the data type of the array does not
match `dtype`, it will be converted to `dtype` prior to using it. If
`None` is specified, the data type for the pointer will be implied from
the input array type.
Returns
-------
ctypes.POINTER
Pointer to the first value of the array.
"""
dtype = arr.dtype if dtype is None else numpy.dtype(dtype).type
# convert to ctypes, also we ensure the array is contiguous
return numpy.ascontiguousarray(arr, dtype=dtype).ctypes.data_as(
ctypes.POINTER(numpy.ctypeslib.as_ctypes_type(dtype)))
def snapto(values, points):
"""
Snap values in array x to their closest equivalent in an array of target values, returning an array of the closest value in `points` to each value in `x`.
Parameters
----------
values : list, tuple or numpy.ndarray
Array of values to be snapped to `points`
points : list, tuple or numpy.ndarray
Array of values to be snapped to
Returns
-------
snapped
Array of values, each corresponds to a value in `x` and is the closest value in `points`.
Examples
--------
Snap labels on a Slider to the x positions of each tick::
labelPositions = [-1, -2/3, -1/3, 1/3, 2/3, 1]
tickPositions = [-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1]
snappedLabelPositions = snapto(x=labelPositions, points=tickPositions)
assert snappedLabelPositions = [-1, -0.6, -0.4, 0.4, 0.6, 1]
"""
# Force values to 1d numpy arrays, though keep track of original shape of x
ogShape = numpy.asarray(values).shape
values = numpy.asarray(values).reshape((1, -1))
points = numpy.asarray(points).reshape((1, -1))
# Get sort order of values and points
valuesi = numpy.argsort(values[0])
pointsi = numpy.argsort(points[0])
# Shift values indices to sit evenly within points indices
valuesi -= min(pointsi)
valuesi = valuesi / max(valuesi) * max(pointsi)
valuesi = valuesi.round().astype(int)
# Get indices of points corresponding to each x value
i = pointsi[valuesi]
# Get corresponding points
snapped1d = points[0, i]
# Reshape to original shape of x
snapped = snapped1d.reshape(ogShape)
return snapped
def createLumPattern(patternType, res, texParams=None, maskParams=None):
"""Create a luminance (single channel) defined pattern.
Parameters
----------
patternType : str or None
Pattern to generate. Value may be one of: 'sin', 'sqr', 'saw', 'tri',
'sinXsin', 'sqrXsqr', 'circle', 'gauss', 'cross', 'radRamp' or
'raisedCos'. If `None`, 'none', 'None' or 'color' are specified, an
array of ones will be returned with `size==(res, res)`.
res : int
Resolution for the texture in texels.
texParams : dict or None
Additional parameters to control texture generation. Not currently used
but may in the future. These can be settings like duty-cycle, etc.
Passing valid values to this parameter do nothing yet.
maskParams : dict or None
Additional parameters to control how the texture's mask is applied.
Returns
-------
ndarray
Array of normalized intensity values containing the desired pattern
specified by `mode`.
Examples
--------
Create a gaussian bump luminance map with resolution 1024x1024 and standard
deviation of 0.5::
res = 1024
maskParams = {'sd': 0.5}
intensity = createLumPattern('gauss', res, None, maskParams)
"""
# This code was originally in `TextureMixin._createTexture`, but moved here
# to clean up that class and to provide a reusable way of generating these
# textures.
# Check and sanitize parameters passed to this function before generating
# anything with them.
if res <= 0:
raise ValueError('invalid value for parameter `res`, must be >0')
# parameters to control texture generation, unused but roughed in for now
allTexParams = {}
if isinstance(texParams, dict): # specified, override defaults if so
allTexParams.update(texParams)
elif texParams is None: # if not specified, use empty dict
pass # nop for now, change to `allTexParams = {}` when needed
else:
raise TypeError('parameter `texParams` must be type `dict` or `None`')
# mask parameters for additional parameters to control how maks are applied
allMaskParams = {'fringeWidth': 0.2, 'sd': 3}
if isinstance(maskParams, dict): # specified, override defaults if so
allMaskParams.update(maskParams)
elif maskParams is None: # if not specified, use empty dict
allMaskParams = {}
else:
raise TypeError('parameter `maskParams` must be type `dict` or `None`')
# correct `makeRadialMatrix` from filters, duplicated her to avoid importing
# all of visual to test this function out
def _makeRadialMatrix(matrixSize, center=(0.0, 0.0), radius=1.0):
if type(radius) in [int, float]:
radius = [radius, radius]
# NB need to add one step length because
yy, xx = numpy.mgrid[0:matrixSize, 0:matrixSize]
xx = ((1.0 - 2.0 / matrixSize * xx) + center[0]) / radius[0]
yy = ((1.0 - 2.0 / matrixSize * yy) + center[1]) / radius[1]
rad = numpy.sqrt(numpy.power(xx, 2) + numpy.power(yy, 2))
return rad
# here is where we generate textures
pi = numpy.pi
if patternType in (None, "none", "None", "color"):
res = 1
intensity = numpy.ones([res, res], numpy.float32)
elif patternType == "sin":
# NB 1j*res is a special mgrid notation
onePeriodX, onePeriodY = numpy.mgrid[0:res, 0:2 * pi:1j * res]
intensity = numpy.sin(onePeriodY - pi / 2)
elif patternType == "sqr": # square wave (symmetric duty cycle)
# NB 1j*res is a special mgrid notation
onePeriodX, onePeriodY = numpy.mgrid[0:res, 0:2 * pi:1j * res]
sinusoid = numpy.sin(onePeriodY - pi / 2)
intensity = numpy.where(sinusoid > 0, 1, -1)
elif patternType == "saw":
intensity = \
numpy.linspace(-1.0, 1.0, res, endpoint=True) * numpy.ones([res, 1])
elif patternType == "tri":
# -1:3 means the middle is at +1
intens = numpy.linspace(-1.0, 3.0, res, endpoint=True)
# remove from 3 to get back down to -1
intens[res // 2 + 1:] = 2.0 - intens[res // 2 + 1:]
intensity = intens * numpy.ones([res, 1]) # make 2D
elif patternType == "sinXsin":
# NB 1j*res is a special mgrid notation
onePeriodX, onePeriodY = numpy.mgrid[0:2 * pi:1j * res,
0:2 * pi:1j * res]
intensity = \
numpy.sin(onePeriodX - pi / 2) * numpy.sin(onePeriodY - pi / 2)
elif patternType == "sqrXsqr":
# NB 1j*res is a special mgrid notation
onePeriodX, onePeriodY = numpy.mgrid[0:2 * pi:1j * res,
0:2 * pi:1j * res]
sinusoid = \
numpy.sin(onePeriodX - pi / 2) * numpy.sin(onePeriodY - pi / 2)
intensity = numpy.where(sinusoid > 0, 1, -1)
elif patternType == "circle":
rad = _makeRadialMatrix(res)
intensity = (rad <= 1) * 2 - 1
elif patternType == "gauss":
rad = _makeRadialMatrix(res)
# 3sd.s by the edge of the stimulus
try:
maskStdev = allMaskParams['sd']
except KeyError:
raise ValueError(
"Mask parameter 'sd' not provided but is required by "
"`mode='gauss'`")
invVar = (1.0 / maskStdev) ** 2.0
intensity = numpy.exp(-rad ** 2.0 / (2.0 * invVar)) * 2 - 1
elif patternType == "cross":
X, Y = numpy.mgrid[-1:1:1j * res, -1:1:1j * res]
tfNegCross = (((X < -0.2) & (Y < -0.2)) |
((X < -0.2) & (Y > 0.2)) |
((X > 0.2) & (Y < -0.2)) |
((X > 0.2) & (Y > 0.2)))
# tfNegCross == True at places where the cross is transparent,
# i.e. the four corners
intensity = numpy.where(tfNegCross, -1, 1)
elif patternType == "radRamp": # a radial ramp
rad = _makeRadialMatrix(res)
intensity = 1 - 2 * rad
# clip off the corners (circular)
intensity = numpy.where(rad < -1, intensity, -1)
elif patternType == "raisedCos": # A raised cosine
hammingLen = 1000 # affects the 'granularity' of the raised cos
rad = _makeRadialMatrix(res)
intensity = numpy.zeros_like(rad)
intensity[numpy.where(rad < 1)] = 1
maskFringeWidth = allMaskParams['fringeWidth']
raisedCosIdx = numpy.where(
[numpy.logical_and(rad <= 1, rad >= 1 - maskFringeWidth)])[1:]
# Make a raised_cos (half a hamming window):
raisedCos = numpy.hamming(hammingLen)[:hammingLen // 2]
raisedCos -= numpy.min(raisedCos)
raisedCos /= numpy.max(raisedCos)
# Measure the distance from the edge - this is your index into the
# hamming window:
dFromEdge = numpy.abs(
(1 - maskFringeWidth) - rad[raisedCosIdx])
dFromEdge /= numpy.max(dFromEdge)
dFromEdge *= numpy.round(hammingLen / 2)
# This is the indices into the hamming (larger for small distances
# from the edge!):
portionIdx = (-1 * dFromEdge).astype(int)
# Apply the raised cos to this portion:
intensity[raisedCosIdx] = raisedCos[portionIdx]
# Scale it into the interval -1:1:
intensity = intensity - 0.5
intensity /= numpy.max(intensity)
# Sometimes there are some remaining artifacts from this process,
# get rid of them:
artifactIdx = numpy.where(
numpy.logical_and(intensity == -1, rad < 0.99))
intensity[artifactIdx] = 1
artifactIdx = numpy.where(
numpy.logical_and(intensity == 1, rad > 0.99))
intensity[artifactIdx] = 0
else:
raise ValueError("invalid keyword or value for parameter `patternType`")
return intensity
if __name__ == "__main__":
pass
|
psychopy/psychopy
|
psychopy/tools/arraytools.py
|
Python
|
gpl-3.0
| 16,591
|
[
"Gaussian"
] |
19e07d7b4de1dea4747f76add63cba5cce70f0bf40719f5fed6ff9094cbafa71
|
#!/usr/bin/env python
import lxml.etree as ET
import subprocess
import sys
import os
import shutil
import argparse
import datetime
import time
import csv
import uuid
from glob import glob
from ififuncs import create_csv
from ififuncs import append_csv
from ififuncs import send_gmail
from ififuncs import hashlib_manifest
from ififuncs import diff_textfiles
from ififuncs import make_manifest
from ififuncs import generate_log
from ififuncs import make_desktop_logs_dir
from premis import make_premis
from premis import write_premis
from premis import make_agent
from premis import make_event
from premis import setup_xml
from premis import create_unit
'''''
Events:
md5 manifest created of source
framemd5 of source
tiff2dpx
framemd5 output
manifest of output
'''
def set_environment(logfile):
env_dict = os.environ.copy()
# https://github.com/imdn/scripts/blob/0dd89a002d38d1ff6c938d6f70764e6dd8815fdd/ffmpy.py#L272
env_dict['FFREPORT'] = 'file={}:level=48'.format(logfile)
return env_dict
def make_framemd5(directory, container, log_filename_alteration):
os.chdir(directory)
images = glob('*.%s' % container)
batch_dir = os.path.basename(os.path.dirname(os.path.dirname(root_dir)))
output_parent_directory = os.path.join(args.o, batch_dir)
if not os.path.isdir(output_parent_directory):
os.makedirs(output_parent_directory)
numberless_filename = images[0].split("_")[0:-1]
ffmpeg_friendly_name = ''
counter = 0
while counter <len(numberless_filename) :
ffmpeg_friendly_name += numberless_filename[counter] + '_'
counter += 1
output_dirname = output_parent_directory + '/' + ffmpeg_friendly_name + 'dpx_transcodes'
try:
os.makedirs(output_dirname + '/image')
os.makedirs(output_dirname + '/image/logs')
os.makedirs(output_dirname + '/image/md5')
os.makedirs(output_dirname + '/image/dpx_files')
os.makedirs(output_dirname + '/image/xml_files')
except: OSError
output = output_dirname + '/image/md5/%s%s.framemd5' % (ffmpeg_friendly_name,container)
logfile = output_dirname + '/image/logs/%s%s.log' % (ffmpeg_friendly_name, log_filename_alteration)
env_dict = set_environment(logfile)
image_seq_without_container = ffmpeg_friendly_name
ffmpeg_friendly_name += "%06d." + '%s' % container
framemd5 = ['ffmpeg','-report','-f','image2', '-i', ffmpeg_friendly_name,'-f','framemd5',output]
print framemd5
subprocess.call(framemd5, env=env_dict)
info = [output_dirname, output, image_seq_without_container, output_parent_directory]
return info
def file_check(dir2check):
os.chdir(dir2check)
tiff_check = glob('*.tiff')
dpx_check = glob('*.dpx')
if len(dpx_check) > 0:
print 'DPX sequence, not TIFF. Not processing'
return 'DPX'
elif len(tiff_check) > 0:
return 'TIFF'
else:
print 'no images found'
return 'none'
def remove_bad_files(root_dir):
rm_these = ['.DS_Store', 'Thumbs.db', 'desktop.ini']
for root, dirs, files in os.walk(root_dir):
for name in files:
path = os.path.join(root, name)
for i in rm_these:
if name == i:
print '***********************' + 'removing: ' + path
os.remove(path)
def premis_log(source_parent_dir, source_directory):
split_list = os.path.basename(os.path.dirname(source_parent_dir)).split('_')
premisxml, premis_namespace, doc, premis = setup_xml(source_directory)
items = {"workflow":"scanning","oe":split_list[0], "filmographic":split_list[1], "sourceAccession":split_list[2], "interventions":['placeholder'], "prepList":['placeholder'], "user":user}
premis = doc.getroot()
framemd5_uuid = str(uuid.uuid4())
final_sip_manifest_uuid = str(uuid.uuid4())
a = doc.xpath('//ns:agentIdentifierValue',namespaces={'ns': premis_namespace})
for i in a:
if i.text == '9430725d-7523-4071-9063-e8a6ac4f84c4':
linkingEventIdentifier = create_unit(-1,i.getparent().getparent(),'linkingEventIdentifier')
linkingEventIdentifierType = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierType')
linkingEventIdentifierValue = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierValue')
linkingEventIdentifierValue.text = final_sip_manifest_uuid
linkingEventIdentifierType.text = 'UUID'
elif i.text == 'ee83e19e-cdb1-4d83-91fb-7faf7eff738e':
linkingEventIdentifier = create_unit(-1,i.getparent().getparent(),'linkingEventIdentifier')
linkingEventIdentifierType = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierType')
linkingEventIdentifierValue = create_unit(1,linkingEventIdentifier, 'linkingEventIdentifierValue')
linkingEventIdentifierValue.text = framemd5_uuid
linkingEventIdentifierType.text = 'UUID'
representation_uuid = doc.findall('//ns:objectIdentifierValue',namespaces={'ns': premis_namespace})[0].text
#ffmpegAgent = make_agent(premis,[framemd5_uuid ], 'ee83e19e-cdb1-4d83-91fb-7faf7eff738e')
make_event(premis, 'message digest calculation', 'Checksum manifest for whole package created', [['UUID','9430725d-7523-4071-9063-e8a6ac4f84c4' ]],final_sip_manifest_uuid,[representation_uuid], 'source', 'now')
make_event(premis, 'message digest calculation', 'Frame level checksums of images', [['UUID','ee83e19e-cdb1-4d83-91fb-7faf7eff738e' ]], framemd5_uuid, [representation_uuid], 'source', 'now' )
write_premis(doc, premisxml)
parser = argparse.ArgumentParser(description='DPX2TIFF specific workflow for IFI'
' Written by Kieran O\'Leary.')
parser.add_argument(
'input', nargs='+',
help='full path of input directory'
)
parser.add_argument(
'-o',
help='full path of output directory', required=True)
args = parser.parse_args()
print args
desktop_logs_dir = make_desktop_logs_dir()
csv_report_filename = os.path.join(desktop_logs_dir, 'dpx_transcode_report' + time.strftime("_%Y_%m_%dT%H_%M_%S") + '.csv')
#permission for correct directories sought from user
permission = ''
all_files = args.input
if not permission == 'y' or permission == 'Y':
print '\n\n**** All TIFF sequences within these directories will be converted to DPX.\n'
for i in all_files:
print i
permission = raw_input('\n**** These are the directories that wil be turned into DPX. \n**** If this looks ok, please press Y, otherwise, type N\n' )
while permission not in ('Y','y','N','n'):
permission = raw_input('\n**** These are the directories that wil be turned into DPX. \n**** If this looks ok, please press Y, otherwise, type N\n')
if permission == 'n' or permission == 'N':
print 'Exiting at your command- Cheerio for now'
sys.exit()
elif permission =='y' or permission == 'Y':
print 'Ok so!'
#user identity sought for accurate premis documentation
user = ''
if not user == '1' or user == '2'or user == '3':
user = raw_input('\n\n**** Who are you?\nPress 1 or 2 or 3\n\n1. Brian Cash\n2. Gavin Martin\n3. Raelene Casey\n' )
while user not in ('1','2','3'):
user = raw_input('\n\n**** Who are you?\nPress 1 or 2 or 3\n\n1. Brian Cash\n2. Gavin Martin\n3. Raelene Casey\n')
if user == '1':
user = 'Brian Cash'
print 'Hi Brian, Congratulations on becoming a father!!!'
elif user == '2':
user = 'Gavin Martin'
print 'Hi Gavin, Have you renewed your subscription to American Cinematographer?'
elif user == '3':
user = 'Raelene Casey'
print 'Hi Raelene, Brian must be out of the office'
time.sleep(1)
create_csv(csv_report_filename, ('Sequence Name', 'Lossless?', 'Start time', 'Finish Time'))
for source_directory in all_files:
for root,dirnames,filenames in os.walk(source_directory):
source_directory = root
if not file_check(source_directory) == 'TIFF':
append_csv(csv_report_filename, (source_directory,'EMPTY DIRECTORY - SKIPPED', 'n/a', 'n/a'))
continue
root_dir = os.path.dirname(os.path.dirname(root))
general_log = root_dir + '/logs/image/%s_image_log.log' % os.path.basename(root_dir)
generate_log(general_log, 'Input = %s' % root)
remove_bad_files(source_directory)
source_parent_dir = os.path.dirname(source_directory)
normpath = os.path.normpath(source_directory)
relative_path = normpath.split(os.sep)[-1]
split_path = os.path.split(os.path.basename(source_directory))[1]
start = datetime.datetime.now()
source_manifest = root_dir + '/%s_manifest.md5' % relative_path
generate_log(general_log, 'Generating source manifest via md5deep and storing as %s' % source_manifest)
make_manifest(root_dir, root_dir, source_manifest)
info = make_framemd5(source_directory, 'tiff', 'tiff_framemd5')
output_dirname = info[0]
source_textfile = info[1]
fmd5copy = root_dir + '/metadata/image'
shutil.copy(source_textfile,fmd5copy )
image_seq_without_container = info[2]
output_parent_directory = info[3]
tiff_filename = image_seq_without_container + "%06d.tiff"
dpx_filename = image_seq_without_container + "%06d.dpx"
logfile = output_dirname + '/image/logs/%sdpx_transcode.log' % image_seq_without_container
env_dict = set_environment(logfile)
generate_log(general_log, 'Starting TIFF to DPX transcode')
tiff2dpx = ['ffmpegnometadata','-report','-f','image2','-framerate','24', '-i', tiff_filename ,output_dirname + '/image/dpx_files' '/' + dpx_filename]
print tiff2dpx
subprocess.call(tiff2dpx,env=env_dict)
generate_log(general_log, 'TIFF to DPX transcode complete')
parent_basename = os.path.basename(output_dirname)
manifest_textfile = os.path.dirname(output_dirname) + '/' + parent_basename + '_manifest.md5'
generate_log(general_log, 'Generating destination manifest via md5deep and storing as %s' % manifest_textfile)
other = make_framemd5(output_dirname + '/image/dpx_files', 'dpx', 'dpx_framemd5')
other_textfile = other[1]
judgement = diff_textfiles(source_textfile, other_textfile)
generate_log(general_log, 'Outcome of transcode was: %s' % judgement)
make_manifest(output_parent_directory, os.path.basename(output_dirname), manifest_textfile)
source_metadata_dir = root_dir + '/metadata/image'
shutil.copy(source_textfile, source_metadata_dir + '/%s' % os.path.basename(source_textfile))
finish = datetime.datetime.now()
'''
begin premis
'''
premis_log(source_parent_dir, source_directory)
append_csv(csv_report_filename, (parent_basename,judgement, start, finish))
|
kieranjol/IFIscripts
|
legacy_scripts/makedpx.py
|
Python
|
mit
| 11,528
|
[
"Brian"
] |
c8b942caa0fe34e01c26b443fe8dda61c1fc7fd5dffec9c05a1c82f2a6ccb3dc
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import django.db.transaction
import tldap.transaction
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Regular cleanup of application db models."
@django.db.transaction.atomic
@tldap.transaction.commit_on_success
def handle(self, **options):
import datetime
from django.db.models import Count
from ...models import Applicant, Application
now = datetime.datetime.now()
verbose = int(options.get('verbosity'))
# Delete all expired unsubmitted applications
for application in Application.objects.filter(
expires__lte=now, submitted_date__isnull=True):
if verbose >= 1:
print(
"Deleted expired unsubmitted application #%s"
% application.id)
application.delete()
month_ago = now - datetime.timedelta(days=30)
# Delete all unsubmitted applications that have been around for 1 month
for application in Application.objects.filter(
created_date__lte=month_ago, submitted_date__isnull=True):
if verbose >= 1:
print("Deleted unsubmitted application #%s" % application.id)
application.delete()
# Delete all applications that have been complete/declined for 1 month
for application in Application.objects.filter(
complete_date__isnull=False, complete_date__lte=month_ago):
if verbose >= 1:
print("Deleted completed application #%s" % application.id)
application.delete()
# Delete all orphaned applicants
for applicant in Applicant.objects.annotate(
cc=Count('application')).filter(cc=0):
if verbose >= 1:
print("Deleted orphaned applicant #%s" % applicant.id)
applicant.delete()
|
Karaage-Cluster/karaage
|
karaage/plugins/kgapplications/management/commands/application_cleanup.py
|
Python
|
gpl-3.0
| 2,647
|
[
"Brian"
] |
b04ee97a87dbc50d1656f0f46e2de26cdf9e7adee99782f4cfdc253ba6b40054
|
"""Schemas to valid user input.
Index
-----
.. currentmodule:: nanoqm.workflows.schemas
.. autosummary::
{autosummary}
API
---
{autodata}
"""
__all__ = [
'schema_cp2k_general_settings',
'schema_derivative_couplings',
'schema_single_points',
'schema_distribute_absorption_spectrum',
'schema_distribute_derivative_couplings',
'schema_distribute_single_points',
'schema_absorption_spectrum',
'schema_ipr',
'schema_coop']
import os
from numbers import Real
from typing import Any, Dict, Iterable
import pkg_resources as pkg
from schema import And, Optional, Or, Regex, Schema, Use
def equal_lambda(name: str) -> And:
"""Create an schema checking that the keyword matches the expected value."""
return And(
str, Use(str.lower), lambda s: s == name)
def any_lambda(array: Iterable[str]) -> And:
"""Create an schema checking that the keyword matches one of the expected values."""
return And(
str, Use(str.lower), lambda s: s in array)
def merge(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]:
"""Merge two dictionaries using without modifying the original."""
x = d1.copy()
x.update(d2)
return x
#: Schema to validate the CP2K general settings
schema_cp2k_general_settings = Schema({
# "Basis set to carry out the quantum chemistry simulation"
"basis": str,
# "Pseudo-potential to carry out the quantum chemistry simulation"
"potential": str,
# Charge of the system
Optional("charge", default=0): int,
# Multiplicity
Optional("multiplicity", default=1): int,
# Specify the Cartesian components for the cell vector Units Angstrom
Optional("cell_parameters", default=10): Or(
Real,
lambda xs: len(xs) == 3 and isinstance(xs, list),
lambda xs: len(xs) == 3 and all(len(r) == 3 for r in xs)),
# Type of periodicity
"periodic": any_lambda(("none", "x", "y", "z", "xy", "xy", "yz", "xyz")),
# Specify the angles between the vectors defining the unit cell
Optional("cell_angles"): list,
# Path to the folder containing the basis set specifications
Optional("path_basis", default=pkg.resource_filename("nanoqm", "basis")): os.path.isdir,
# Settings describing the input of the quantum package
"cp2k_settings_main": object,
# Settings describing the input of the quantum package
# to compute the guess wavefunction"
"cp2k_settings_guess": object,
# Restart File Name
Optional("wfn_restart_file_name", default=None): Or(str, None),
# File containing the Parameters of the cell if those
# parameters change during the MD simulation.
Optional("file_cell_parameters", default=None): Or(str, None),
# Quality of the auxiliar basis cFIT
Optional("aux_fit", default="verygood"):
any_lambda(("low", "medium", "good", "verygood", "excellent")),
# executable name
# "sdbg" Serial single core testing and debugging
# "sopt" Serial general single core usage
# "ssmp" Parallel (only OpenMP), single node, multi core
# "pdbg" Parallel (only MPI) multi-node testing and debugging
# "popt" Parallel (only MPI) general usage, no threads
# "psmp" parallel (MPI + OpenMP) general usage, threading might improve scalability and memory usage
Optional("executable", default="cp2k.psmp"):
Regex(r'.*cp2k\.(?:popt|psmp|sdbg|sopt|ssmp|pdbg)', flags=2) # flag 2 == IGNORECASE
})
#: Dictionary with the options common to all workflows
dict_general_options = {
# Number of occupied/virtual orbitals to use
Optional('active_space', default=[10, 10]): And(list, lambda xs: len(xs) == 2),
# Index of the HOMO
Optional("nHOMO"): int,
# Index of the orbitals to compute the couplings
Optional("mo_index_range"): tuple,
# "default quantum package used"
Optional("package_name", default="cp2k"): str,
# project
Optional("project_name", default="namd"): str,
# Working directory
Optional("scratch_path", default=None): Or(None, str),
# path to the HDF5 to store the results
Optional("path_hdf5", default="quantum.hdf5"): str,
# path to xyz trajectory of the Molecular dynamics
"path_traj_xyz": os.path.exists,
# Real from where to start enumerating the folders create for each point
# in the MD
Optional("enumerate_from", default=0): int,
# Ignore the warning issues by the quantum package and keep computing
Optional("ignore_warnings", default=False): bool,
# Calculate the guess wave function in either the first point of the
# trajectory or in all
Optional("calculate_guesses", default="first"):
any_lambda(("first", "all")),
# Units of the molecular geometry on the MD file
Optional("geometry_units", default="angstrom"):
any_lambda(("angstrom", "au")),
# Integration time step used for the MD (femtoseconds)
Optional("dt", default=1): Real,
# Deactivate the computation of the orbitals for debugging purposes
Optional("compute_orbitals", default=True): bool,
# Flag to remove the log containing the orbitals for debugging purposes
Optional("remove_log_file", default=False): bool,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings,
# Empty string for restricted calculation or either alpha/beta
# for unrestricted calculation
Optional("orbitals_type", default=""): any_lambda(("", "alphas", "betas", "both"))
}
#: Dict with input options to run a derivate coupling workflow
dict_derivative_couplings = {
# Name of the workflow to run
"workflow": equal_lambda("derivative_couplings"),
# Algorithm used to compute the derivative couplings
Optional("algorithm", default="levine"):
any_lambda(("levine", "3points")),
# Use MPI to compute the couplings
Optional("mpi", default=False): bool,
# Track the crossing between states
Optional("tracking", default=True): bool,
# Write the overlaps in ascii
Optional("write_overlaps", default=False): bool,
# Compute the overlap between molecular geometries using a dephase"
Optional("overlaps_deph", default=False): bool
}
dict_merged_derivative_couplings = merge(
dict_general_options, dict_derivative_couplings)
#: Schema to validate the input for a derivative coupling calculation
schema_derivative_couplings = Schema(
dict_merged_derivative_couplings)
#: Schema to validate the input for a job scheduler
schema_job_scheduler = Schema({
Optional("scheduler", default="slurm"):
any_lambda(("slurm", "pbs")),
Optional("nodes", default=1): int,
Optional("tasks", default=1): int,
Optional("wall_time", default="01:00:00"): str,
Optional("job_name", default="namd"): str,
Optional("queue_name", default="short"): str,
Optional("load_modules", default=""): str,
Optional("free_format", default=""): str
})
#: Input options to distribute a job
dict_distribute = {
Optional("workdir", default=os.getcwd()): str,
# Number of chunks to split the trajectory
"blocks": int,
# Resource manager configuration
"job_scheduler": schema_job_scheduler,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings,
}
#: input to distribute a derivative coupling job
dict_distribute_derivative_couplings = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_derivative_couplings")
}
#: Schema to validate the input to distribute a derivate coupling calculation
schema_distribute_derivative_couplings = Schema(
merge(
dict_distribute,
merge(
dict_merged_derivative_couplings,
dict_distribute_derivative_couplings)))
#: Input for an absorption spectrum calculation
dict_absorption_spectrum = {
# Name of the workflow to run
"workflow": equal_lambda("absorption_spectrum"),
# Type of TDDFT calculations. Available: sing_orb, stda, stddft
Optional("tddft", default="stda"): And(
str, Use(str.lower), lambda s: s in ("sing_orb", "stda", "stdft")),
# Interval between MD points where the oscillators are computed"
Optional("stride", default=1): int,
# description: Exchange-correlation functional used in the DFT
# calculations,
Optional("xc_dft", default="pbe"): str
}
dict_merged_absorption_spectrum = merge(
dict_general_options, dict_absorption_spectrum)
#: Schema to validate the input for an absorption spectrum calculation
schema_absorption_spectrum = Schema(dict_merged_absorption_spectrum)
dict_distribute_absorption_spectrum = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_absorption_spectrum")
}
schema_distribute_absorption_spectrum = Schema(
merge(dict_distribute, merge(
dict_merged_absorption_spectrum, dict_distribute_absorption_spectrum)))
dict_single_points = {
# Name of the workflow to run
"workflow": any_lambda(("single_points", "ipr_calculation", "coop_calculation")),
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
#: input to distribute single point calculations
dict_distribute_single_points = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_single_points")
}
#: Input for a Crystal Orbital Overlap Population calculation
dict_coop = {
# List of the two elements to calculate the COOP for
"coop_elements": list}
dict_merged_single_points = merge(dict_general_options, dict_single_points)
#: Schema to validate the input of a single pointe calculation
schema_single_points = Schema(dict_merged_single_points)
#: Schema to validate the input for a Inverse Participation Ratio calculation
schema_ipr = schema_single_points
#: Input for a Crystal Orbital Overlap Population calculation
dict_merged_coop = merge(dict_merged_single_points, dict_coop)
#: Schema to validate the input for a Crystal Orbital Overlap Population calculation
schema_coop = Schema(dict_merged_coop)
#: Schema to validate the input to distribute a single point calculation
schema_distribute_single_points = Schema(
merge(dict_distribute, merge(
dict_merged_single_points, dict_distribute_single_points)))
|
SCM-NV/qmworks-namd
|
nanoqm/workflows/schemas.py
|
Python
|
mit
| 10,214
|
[
"CP2K",
"CRYSTAL",
"NAMD"
] |
b8693fe24f477a4256135f83f898c8f0e88a4bf740c930a7fb20cca731ebaa48
|
'''
Code to calculate clusters using a Dirichlet Process
Gaussian mixture model.
Requires scikit-learn:
http://scikit-learn.org/stable/
'''
import numpy
from sklearn import mixture
FILENAME = "mcdonalds-normalized-data.tsv"
# Note: you'll have to remove the last "name" column in the file (or
# some other such thing), so that all the columns are numeric.
x = numpy.loadtxt(open(FILENAME, "rb"), delimiter = "\t", skiprows = 1)
dpgmm = mixture.DPGMM(n_components = 25)
dpgmm.fit(x)
clusters = dpgmm.predict(x)
|
bharcode/Kaggle
|
DirichletProcesses/dpgmm.py
|
Python
|
gpl-2.0
| 516
|
[
"Gaussian"
] |
44654a190a39d88407716c343334e764edb88ae0aef74256f2dd163efc4ac3d0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import re
import uuid
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.objects import test_migration
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_LOCAL_GB = 6
FAKE_VIRT_VCPUS = 1
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps([{
'label': 'forza-napoli',
'dev_type': 'foo',
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1'}])
return d
def estimate_instance_overhead(self, instance_info):
mem = instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"stats": [{"key": "num_instances", "value": "1"}],
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
itype = self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(itype)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(itype, 'new_') +
self._fake_instance_system_metadata(itype, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'memory_mb': 2,
'root_gb': 3,
'ephemeral_gb': 1,
'os_type': 'Linux',
'project_id': '123456',
'vcpus': 1,
'host': None,
'node': None,
'instance_type_id': 1,
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': FAKE_VIRT_LOCAL_GB / 2,
'ephemeral_gb': FAKE_VIRT_LOCAL_GB / 2,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor'
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
self.tracker.update_available_resource(self.context)
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.updated = True
values['stats'] = [{"key": "num_instances", "value": "1"}]
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_MB +
FAKE_VIRT_MEMORY_OVERHEAD, disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus
}
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self._assert('{}', 'pci_stats')
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
expected = """[{"count": 1,
"vendor_id": "v1",
"product_id": "p1",
"extra_info": {"extra_k1": "v1"}}]"""
expected = re.sub(r'\s+', '', expected)
pci = re.sub(r'\s+', '', self.tracker.compute_node['pci_stats'])
self.assertEqual(expected, pci)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class InstanceClaimTestCase(BaseTrackerTestCase):
def test_update_usage_only_for_tracked(self):
instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
task_state=None)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(3 + FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(2, 'local_gb_used')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(3 + FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(2, 'local_gb_used')
self._assert(1, 'current_workload')
def test_claim_and_audit(self):
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(claim_mem + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem - FAKE_VIRT_MEMORY_OVERHEAD,
self.compute["free_ram_mb"])
self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(5 - claim_mem - FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['free_ram_mb'])
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(6 - claim_disk, self.compute['free_disk_gb'])
def test_claim_and_abort(self):
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem - FAKE_VIRT_MEMORY_OVERHEAD,
self.compute["free_ram_mb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(5, self.compute["free_ram_mb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(6, self.compute["free_disk_gb"])
def test_instance_claim_with_oversubscription(self):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
def test_additive_claims(self):
self.limits['vcpu'] = 2
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1,
vcpus=1)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1,
vcpus=1)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 + 2 * FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(4, self.tracker.compute_node['local_gb_used'])
self.assertEqual(2, self.tracker.compute_node['vcpus_used'])
def test_context_claim_with_exception(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
def test_instance_context_claim(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(1 + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1 + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(2, self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(1 + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1 + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqual(2, self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
def test_cpu_stats(self):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(vcpus=1)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(vcpus=10)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(11, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self, ctxt):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(migration_obj.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = migration_obj.Migration()
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create('fake')
def test_claim(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_abort(self):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_additive_claims(self):
limits = self._limits(FAKE_VIRT_MEMORY_MB * 2 +
FAKE_VIRT_MEMORY_OVERHEAD * 2,
FAKE_VIRT_LOCAL_GB * 2,
FAKE_VIRT_VCPUS * 2)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_MB + 2 * FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
def test_claim_and_audit(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_same_host(self):
self.limits['vcpu'] = 3
src_type = self._fake_flavor_create(id=2, memory_mb=1,
root_gb=1, ephemeral_gb=0, vcpus=1)
dest_type = self._fake_flavor_create(id=2, memory_mb=2,
root_gb=2, ephemeral_gb=1, vcpus=2)
# make an instance of src_type:
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=0,
vcpus=1, instance_type_id=2)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(3 + FAKE_VIRT_MEMORY_OVERHEAD * 2, 'memory_mb_used')
self._assert(4, 'local_gb_used')
self._assert(3, 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(1 + FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(1, 'local_gb_used')
self._assert(1, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_revert(self):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.drop_resize_claim(self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_revert_reserve_source(self):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
migration = self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_MIGRATING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
def test_dupe_filter(self):
self._fake_flavor_create(id=2, memory_mb=1, root_gb=1,
ephemeral_gb=1, vcpus=1)
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_set_instance_host_and_node(self):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
1) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
2) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': 4, 'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': 4, 'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
# 2 instances, 4 mb each, plus overhead
self.assertEqual(8 + 2 * FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
|
imsplitbit/nova
|
nova/tests/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 38,692
|
[
"exciting"
] |
aa10a30629b986c9bc3d1de3cb5204683b0869d84c845ebea532a48d2406c417
|
#!/usr/bin/python
# file: formatter.py
# author: Andrea Vedaldi
# description: Utility to format MATLAB comments.
# Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
# All rights reserved.
#
# This file is part of the VLFeat library and is made available under
# the terms of the BSD license (see the COPYING file).
"""
MDOC fromats the help block of a MATLAB M-file based on a simple set
of rules. Pharagraphs, verbatim sections, lists and other structures
are automatically instantiated by looking at blank lines, indentation
and a few decoration symbols.
The documentation starts at a conventional indentation level N (by
default 2). A block of non-epmty lines prefixed by N characters is
considered a paragraph. For instance
| Bla bla bla
| bla bla bla.
|
| Bla bla.
generates two pharagraps. If there are more than N white spaces,
then the block is taken verbatim instead (and rendered in <pre> HTML
tags). For instance
| Bla bla bla
| Code Code Code
|
| Code Code Code
generates one paragraph followed by one verbatim section.
"""
import xml.dom.minidom
import sys
import os
import re
__mpname__ = 'MDocFormatter'
__version__ = '0.1'
__date__ = '2008-01-01'
__description__ = 'MDoc formatting module'
__long_description__ = __doc__
__license__ = 'BSD'
__author__ = 'Andrea Vedaldi'
# terminal
class Terminal:
def isa(self, classinfo):
return isinstance(self, classinfo)
# empty terminal
class E (Terminal):
pass
# blank line
class B (Terminal):
content = ""
# non-blank line
class L (Terminal):
indent = 0
# regular line
class PL (L):
pass
# line with bullet
class BL (L):
bullet = None
inner_indent = 0
# line with description
class DL (L):
pass
# --------------------------------------------------------------------
def lex(line):
# --------------------------------------------------------------------
"""
Parse the string LINE to a terminal symbol. Each line corresponds
to exactly one terminal type. Terminal types are the leaf of a
hierarchy of types.
"""
# a blank line
match = re.match(r"\s*\n?$", line) ;
if match: return B()
# a line of the type ' content::inner_content'
match = re.match(r"(\s*)(.*)::(.*)\n?$", line)
if match:
x = DL()
x.indent = len(match.group(1))
x.content = match.group(2)
x.inner_content = match.group(3)
return x
# a line of the type ' - inner_contet'
match = re.match(r"(\s*)([-\*#]\s*)(\S.*)\n?$", line)
if match:
x = BL()
x.indent = len(match.group(1))
x.inner_content = match.group(3)
x.bullet = match.group(2)
x.inner_indent = x.indent + len(x.bullet)
x.content = x.bullet + x.inner_content
return x
# a line of the type ' content'
match = re.match(r"(\s*)(\S.*)\n?$", line)
if match:
x = PL()
x.indent = len(match.group(1))
x.content = match.group(2)
return x
# --------------------------------------------------------------------
class Lexer(object):
# --------------------------------------------------------------------
"""
l = Lexer(LINES) parses the array of strings LINES. Lexer has a
head pointing to the current line. The head can be controlled by
the following methods:
l.next() advances the head and fetches the next terminal.
l.back() moves back the head.
l.getpos() returns the head position.
l.seek(POS) sets the head position to POS.
"""
def __init__(self, lines):
self.tokens = []
self.pos = -1
for line in lines:
self.tokens.append(lex(line))
def next(self):
self.pos = self.pos + 1
if self.pos >= len(self.tokens):
return E()
else:
return self.tokens [self.pos]
def seek(self, pos):
self.pos = pos
def back(self):
if self.pos >=0: self.pos -= 1
def rewrite(self, str):
self.tokens [self.pos] = str ;
def getpos(self):
return self.pos
def __str__(self):
str = ""
for i,t in enumerate(self.tokens):
str += "%5d) %s %s\n" % (i, t.__class__.__name__,t.content)
return str
# --------------------------------------------------------------------
class Formatter:
# --------------------------------------------------------------------
"""
f = Formatter(LINES) parses the array of strings LINES.
f = Formatter(LINES, FUNCS) takes the dictionary of functions
FUNCS. Function names must be uppercase. The dictionary entries
are used to cross link functions in the generated documentation.
Formatter(LINES, FUNCS, LINKTYPE) produces links of the specified
type. Use 'a' for HTML anchors and 'wiki' for MediaWiki style
links.
f.toDOM() process the data to construct an XML (HTML) representation
of them.
"""
def __init__ (self, lines, funcs={}, linktype='a'):
self.indentinit = 0
lineone = lines[0]
while lineone.startswith(' '):
lineone = lineone[1:]
self.indentinit += 1
self.tokens = Lexer(lines)
self.xmldoc = xml.dom.minidom.Document()
self.funcs = funcs
self.linktype = linktype
#print self.tokens
def toTextNode(self,s):
return self.xmldoc.createTextNode(unicode(s, 'iso-8859-1'))
def addAttr(self, tag, attr, val):
x = self.xmldoc.createAttribute(attr)
x.nodeValue = val
tag.setAttributeNode(x)
def addText(self, tag, s):
txt = self.toTextNode(s)
tag.appendChild(txt)
def addFancyText(self, tag, s):
"Adds text while transforming function references to links."
xs = []
last = -1
iter = re.finditer(r'(?:'
r'(?P<function>[A-Z][A-Z0-9_]*)'
r'\([^\)]*\)'
r')|(?:'
r'<a href="matlab:vl_help\(\''
r'(?P<page>[a-zA-Z0-9_]*)'
r'\'\)">'
r'(?P<text>[^<]*)'
r'</a>'
r')',s)
# r'(?P<page>[a-zA-Z0-9_]*)'
# r')', s)
# r')', s)
for i in iter:
func_name = i.group("function")
page_name = i.group("page")
if func_name and self.funcs.has_key(func_name.upper()):
# retrieve function HTML location
func_href = self.funcs[func_name.upper()]
# add text so far
xs.append(self.toTextNode(s[last+1:i.start()]))
if self.linktype == 'a':
# add link to function
atag = self.xmldoc.createElement(u"a")
self.addText(atag, i.group('function'))
atag.setAttribute(u"href", u"%s" % (func_href))
xs.append(atag)
elif self.linktype == 'wiki':
linktxt = "[[%s|%s]]" % (func_href, i.group('function'))
xs.append(self.toTextNode(linktxt))
# set head
last = i.start()+len(i.group(1))-1
elif page_name:
#print "page %s:" % page_name, i.group("text")
page_href = "%%dox:%s;" % page_name
# add text so far
xs.append(self.toTextNode(s[last+1:i.start()]))
if self.linktype == 'a':
# add link to function
atag = self.xmldoc.createElement(u"a")
self.addText(atag, i.group('text'))
atag.setAttribute(u"href", u"%s" % (page_href))
xs.append(atag)
elif self.linktype == 'wiki':
linktxt = "[[%s|%s]]" % (func_href, i.group('function'))
xs.append(self.toTextNode(linktxt))
# set head
last = i.end()-1
xs.append(self.toTextNode(s[last+1:]))
for x in xs:
tag.appendChild(x)
# ................................................................
# E, B, L, PL, BL, DL, ...
def parse_Terminal(self, T):
"If the next terminal on the stream is of type T, the terminal"
"is extracted and returned. Otherwise the function returns None"
pos = self.tokens.getpos()
t = self.tokens.next()
if t.isa(T):
return t
self.tokens.seek(pos)
return None
# ................................................................
# DIV(N) -> (B | P(N) | BL(N) | DL(N) | V(N))+
def parse_DIV(self, indent):
"Parse a DIV(N) symbol. A DIV(N) a sequence of blank"
"lines (B or other blocks at indentation level N, such as"
"pharagraphs P(N), bullet lists BL(N), description lists DN(N)"
pos = self.tokens.getpos()
xs = []
while True:
x = self.parse_Terminal(B)
if x: continue
x = self.parse_P(indent)
if x:
xs.append(x)
continue
x = self.parse_V(indent)
if x:
xs.append(x)
continue
x = self.parse_UL(indent)
if x:
xs.append(x)
continue
x = self.parse_DL(indent)
if x:
xs.append(x)
continue
break
if len(xs) == 0: return None
return xs
# ................................................................
# P(N) -> PL(N) L(N)*
def parse_P(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
# Introduced by PL
x = self.parse_Terminal(PL)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
else:
self.tokens.back()
if not good:
return None
# Continued by zero or more L
while True:
x = self.parse_Terminal(L)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
continue
else:
self.tokens.back()
break
ptag = self.xmldoc.createElement("p")
self.addFancyText(ptag, content)
return ptag
# ................................................................
# V(N) -> L(M)+, M > N
def parse_V(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
while True:
x = self.parse_Terminal(L)
if x:
if x.indent > indent:
content += " "*(x.indent - indent) + x.content + "\n"
good = True
continue
else:
self.tokens.back()
x = self.parse_Terminal(B)
if x:
content += "\n"
continue
break
if good:
ptag = self.xmldoc.createElement("pre")
# remove potential blank line at the end
if content[-2:] == "\n\n":
content= content[:-1]
self.addText(ptag, content)
return ptag
self.tokens.seek(pos)
return None
# ................................................................
# UL(N) -> ULI(N)+
def parse_UL(self, indent):
xs = []
while True:
x = self.parse_ULI(indent)
if x:
xs.append(x)
continue
break
if len(xs) == 0: return None
ultag = self.xmldoc.createElement("ul")
for x in xs:
ultag.appendChild(x)
return ultag
# ................................................................
# ULI(N) -> UL(N,M) L(M)* DIV(M), M > N
def parse_ULI(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
# Introduced by UL
x = self.parse_Terminal(BL)
if x:
if x.indent == indent:
content += x.inner_content + "\n"
indent = x.inner_indent
good = True
else:
self.tokens.back()
if not good:
return None
# Continued by zero or more L
while True:
x = self.parse_Terminal(L)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
continue
else:
self.tokens.back()
break
litag = self.xmldoc.createElement(u"li")
ptag = self.xmldoc.createElement(u"p")
self.addFancyText(ptag, content)
litag.appendChild(ptag)
# Continued by DIV
xs = self.parse_DIV(indent)
if xs:
for x in xs:
litag.appendChild(x)
return litag
# ................................................................
# DL(N) -> DI(N)+
def parse_DL(self, indent):
xs = []
while True:
x = self.parse_DI(indent)
if x:
xs += x
continue
break
if len(xs) == 0: return None
dltag = self.xmldoc.createElement(u"dl")
for x in xs:
dltag.appendChild(x)
return dltag
# ................................................................
# DI(N) -> DL(N) DIV(M)?, M > N
def parse_DI(self, indent):
content = "\n"
good = False
pos = self.tokens.getpos()
xs = []
# Introduced by DL
x = self.parse_Terminal(DL)
if x:
if x.indent == indent:
content += x.content + "\n"
good = True
else:
self.tokens.back()
if not good:
return None
if False:
# adds text after :: as part of the description dd
dttag = self.xmldoc.createElement(u"dt")
dttxt = self.toTextNode(content)
dttag.appendChild(dttxt)
xs.append(dttag)
# Inject inner_content
c = x.inner_content.strip()
if len(c) > 0:
tk = PL()
tk.content = x.inner_content
t = self.tokens.next()
self.tokens.back()
if t.isa(L) and t.indent > indent:
tk.indent = t.indent
else:
tk.indent = indent+1 ;
self.tokens.rewrite(tk)
self.tokens.back()
else:
# adds text after :: as part of the description term dt
dttag = self.xmldoc.createElement(u"dt")
dttxt = self.toTextNode(content)
dttag.appendChild(dttxt)
c = x.inner_content.strip()
if len(c) > 0:
deftag = self.xmldoc.createElement(u"span")
self.addAttr(deftag, "class", "defaults")
self.addText(deftag, c)
dttag.appendChild(deftag)
xs.append(dttag)
# Continued by DIV
t = self.tokens.next()
self.tokens.back()
if t.isa(L) and t.indent > indent:
xs_ = self.parse_DIV(t.indent)
if len(xs_) > 0:
ddtag = self.xmldoc.createElement(u"dd")
for x in xs_:
ddtag.appendChild(x)
xs.append(ddtag)
return xs
# ................................................................
def toDOM(self):
# write <mfile></mfile>
xmf = self.xmldoc.createElement("div")
xmf.setAttribute(u"class", u"documentation")
self.xmldoc.appendChild(xmf)
# parse documentation
xs = self.parse_DIV(self.indentinit)
for x in xs: xmf.appendChild(x)
return self.xmldoc
if __name__ == '__main__':
text=""" Lorem Ipsum is simply dummy text of the printing and typesetting
industry. Lorem Ipsum has been the industry's standard dummy text
ever since the 1500s, when an unknown printer took a galley of type
and scrambled it to make a type specimen book. It has survived not
only five centuries, but also the leap into electronic typesetting,
remaining essentially unchanged. It was popularised in the 1960s with
the release of Letraset sheets containing Lorem Ipsum passages, and
more recently with desktop publishing software like Aldus PageMaker
including versions of Lorem Ipsum.
Also <a href="matlab:vl_help('fisher')">Fisher vectors</a>.
These are links BL(), BL(A,B) and BLA(A,A) (as long as the dictionary
cites them).
Mimamama
verbatim1
verbatim2
verbatim3
verbatim4
verbatim5
Lorem Ipsum is simply dummy text of the printing and typesetting
industry. Lorem Ipsum has been the industry's standard dummy text
ever since the 1500s, when an unknown printer took a galley of type
and scrambled it to make a type specimen book. It has survived not
only five centuries, but also the leap into electronic typesetting,
remaining essentially unchanged. It was popularised in the 1960s with
the release of Letraset sheets containing Lorem Ipsum passages, and
more recently with desktop publishing software like Aldus PageMaker
including versions of Lorem Ipsum.
- outer1 /
outer1 line 2 /
outer1 line 3 /
outer1 new paragarph
- inner1
- inner2
- inner3
continued on next line
continued with verbatim
more verbatim after blank
- inner4
- outer again
- outer
bla
- list2
- list4
- BL()
- BL(A,B)
Test descrition::
Lorem Ipsum is simply dummy text of the printing
and typesetting industry. Lorem Ipsum has been the industry's
standard dummy text ever since the 1500s, when an unknown printer
took a galley of type and scrambled it to make a type specimen
book. It has survived not only five centuries, but also the leap
into electronic typesetting, remaining essentially unchanged. It
was popularised in the 1960s with the release of Letraset sheets
containing Lorem Ipsum passages, and more recently with desktop
publishing software like Aldus PageMaker including versions of
Lorem Ipsum.
Ancora::
Bli bli bli
Blu blu blu
- list
- lust
- last
Bli bla
Verbatimmo
"""
lines = text.splitlines()
formatter = Formatter(lines, {'BL':'http://www.google.com'}, 'a')
print formatter.toDOM().toxml("UTF-8")
|
ducha-aiki/mods
|
vlfeat/docsrc/formatter.py
|
Python
|
gpl-2.0
| 18,970
|
[
"Brian"
] |
9a7390f46d674f5bbba24edfbfbc1ad4adec506ddb3aff4744311ed10c1b4bd9
|
# Generated from ShExDoc.g4 by ANTLR 4.5.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .ShExDocParser import ShExDocParser
else:
from ShExDocParser import ShExDocParser
# This class defines a complete generic visitor for a parse tree produced by ShExDocParser.
class ShExDocVisitor(ParseTreeVisitor):
# Visit a parse tree produced by ShExDocParser#shExDoc.
def visitShExDoc(self, ctx:ShExDocParser.ShExDocContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#statement.
def visitStatement(self, ctx:ShExDocParser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#notStartAction.
def visitNotStartAction(self, ctx:ShExDocParser.NotStartActionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#directive.
def visitDirective(self, ctx:ShExDocParser.DirectiveContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassDefinition.
def visitValueClassDefinition(self, ctx:ShExDocParser.ValueClassDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassExpr.
def visitValueClassExpr(self, ctx:ShExDocParser.ValueClassExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassLabel.
def visitValueClassLabel(self, ctx:ShExDocParser.ValueClassLabelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#baseDecl.
def visitBaseDecl(self, ctx:ShExDocParser.BaseDeclContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#prefixDecl.
def visitPrefixDecl(self, ctx:ShExDocParser.PrefixDeclContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#start.
def visitStart(self, ctx:ShExDocParser.StartContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#shape.
def visitShape(self, ctx:ShExDocParser.ShapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#shapeDefinition.
def visitShapeDefinition(self, ctx:ShExDocParser.ShapeDefinitionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#includeSet.
def visitIncludeSet(self, ctx:ShExDocParser.IncludeSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#inclPropertySet.
def visitInclPropertySet(self, ctx:ShExDocParser.InclPropertySetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#someOfShape.
def visitSomeOfShape(self, ctx:ShExDocParser.SomeOfShapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#multiElementSomeOf.
def visitMultiElementSomeOf(self, ctx:ShExDocParser.MultiElementSomeOfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#innerShape.
def visitInnerShape(self, ctx:ShExDocParser.InnerShapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#groupShape.
def visitGroupShape(self, ctx:ShExDocParser.GroupShapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#singleElementGroup.
def visitSingleElementGroup(self, ctx:ShExDocParser.SingleElementGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#multiElementGroup.
def visitMultiElementGroup(self, ctx:ShExDocParser.MultiElementGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#unaryShape.
def visitUnaryShape(self, ctx:ShExDocParser.UnaryShapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#encapsulatedShape.
def visitEncapsulatedShape(self, ctx:ShExDocParser.EncapsulatedShapeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#include.
def visitInclude(self, ctx:ShExDocParser.IncludeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#shapeLabel.
def visitShapeLabel(self, ctx:ShExDocParser.ShapeLabelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#tripleConstraint.
def visitTripleConstraint(self, ctx:ShExDocParser.TripleConstraintContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#senseFlags.
def visitSenseFlags(self, ctx:ShExDocParser.SenseFlagsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#predicate.
def visitPredicate(self, ctx:ShExDocParser.PredicateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassOrRef.
def visitValueClassOrRef(self, ctx:ShExDocParser.ValueClassOrRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassLiteral.
def visitValueClassLiteral(self, ctx:ShExDocParser.ValueClassLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassNonLiteral.
def visitValueClassNonLiteral(self, ctx:ShExDocParser.ValueClassNonLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassDatatype.
def visitValueClassDatatype(self, ctx:ShExDocParser.ValueClassDatatypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassGroup.
def visitValueClassGroup(self, ctx:ShExDocParser.ValueClassGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassValueSet.
def visitValueClassValueSet(self, ctx:ShExDocParser.ValueClassValueSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueClassAny.
def visitValueClassAny(self, ctx:ShExDocParser.ValueClassAnyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#groupShapeConstr.
def visitGroupShapeConstr(self, ctx:ShExDocParser.GroupShapeConstrContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#shapeOrRef.
def visitShapeOrRef(self, ctx:ShExDocParser.ShapeOrRefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#xsFacet.
def visitXsFacet(self, ctx:ShExDocParser.XsFacetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#stringFacet.
def visitStringFacet(self, ctx:ShExDocParser.StringFacetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#stringLength.
def visitStringLength(self, ctx:ShExDocParser.StringLengthContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#numericFacet.
def visitNumericFacet(self, ctx:ShExDocParser.NumericFacetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#numericRange.
def visitNumericRange(self, ctx:ShExDocParser.NumericRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#numericLength.
def visitNumericLength(self, ctx:ShExDocParser.NumericLengthContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#datatype.
def visitDatatype(self, ctx:ShExDocParser.DatatypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#annotation.
def visitAnnotation(self, ctx:ShExDocParser.AnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#cardinality.
def visitCardinality(self, ctx:ShExDocParser.CardinalityContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#repeatRange.
def visitRepeatRange(self, ctx:ShExDocParser.RepeatRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#min_range.
def visitMin_range(self, ctx:ShExDocParser.Min_rangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#max_range.
def visitMax_range(self, ctx:ShExDocParser.Max_rangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#valueSet.
def visitValueSet(self, ctx:ShExDocParser.ValueSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#value.
def visitValue(self, ctx:ShExDocParser.ValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#iriRange.
def visitIriRange(self, ctx:ShExDocParser.IriRangeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#exclusion.
def visitExclusion(self, ctx:ShExDocParser.ExclusionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#literal.
def visitLiteral(self, ctx:ShExDocParser.LiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#numericLiteral.
def visitNumericLiteral(self, ctx:ShExDocParser.NumericLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#rdfLiteral.
def visitRdfLiteral(self, ctx:ShExDocParser.RdfLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#booleanLiteral.
def visitBooleanLiteral(self, ctx:ShExDocParser.BooleanLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#string.
def visitString(self, ctx:ShExDocParser.StringContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#iri.
def visitIri(self, ctx:ShExDocParser.IriContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#prefixedName.
def visitPrefixedName(self, ctx:ShExDocParser.PrefixedNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#blankNode.
def visitBlankNode(self, ctx:ShExDocParser.BlankNodeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#codeDecl.
def visitCodeDecl(self, ctx:ShExDocParser.CodeDeclContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#productionName.
def visitProductionName(self, ctx:ShExDocParser.ProductionNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#startActions.
def visitStartActions(self, ctx:ShExDocParser.StartActionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#semanticActions.
def visitSemanticActions(self, ctx:ShExDocParser.SemanticActionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ShExDocParser#rdfType.
def visitRdfType(self, ctx:ShExDocParser.RdfTypeContext):
return self.visitChildren(ctx)
del ShExDocParser
|
hsolbrig/shexypy
|
shexypy/shexyparser/parser/ShExDocVisitor.py
|
Python
|
mit
| 11,828
|
[
"VisIt"
] |
cc384b34d749388a2ddc424f34cb1abd06356fddfcfaebef0a397a2007c0c786
|
"""Plot the result of sampling clusters
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta, ttrajs = load_trajs('ttrajs')
txx = np.concatenate(list(ttrajs.values()))
kmeans = load_generic('kmeans.pickl')
inds = load_generic("cluster-sample-inds.pickl")
coordinates = [
np.asarray([ttrajs[traj_i][frame_i, :] for traj_i, frame_i in state_inds])
for state_inds in inds
]
## Overlay sampled states on histogram
def plot_sampled_states(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap='magma_r',
mincnt=1,
bins='log',
alpha=0.8,
)
# Show sampled points as scatter
# Annotate cluster index
for i, coo in enumerate(coordinates):
plt.scatter(coo[:, 0], coo[:, 1], c=colors[i % 6], s=40)
ax.text(kmeans.cluster_centers_[i, 0],
kmeans.cluster_centers_[i, 1],
"{}".format(i),
ha='center',
va='center',
size=16,
bbox=dict(
boxstyle='round',
fc='w',
ec="0.5",
alpha=0.9,
),
zorder=10,
)
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
## Render a script for loading in vmd
def load_in_vmd(dirname='cluster_samples'):
k = len(inds[0])
templ = [
'# autogenerated by msmbuilder',
'# open with `vmd -e load-cluster-samples.tcl`',
'',
'# Defaults',
'mol default material Transparent',
'mol default representation NewCartoon',
'',
]
for i in range(len(inds)):
templ += [
'# State {}'.format(i),
'mol new top.pdb',
'mol addfile {}/{}.xtc waitfor all'.format(dirname, i),
'animate delete beg 0 end 0 top',
'mol rename top State-{}'.format(i),
'mol modcolor 0 top ColorID {}'.format(i),
'mol drawframes top 0 0:{k}'.format(k=k),
'',
]
return '\n'.join(templ)
## Plot
fig, ax = plt.subplots(figsize=(7, 5))
plot_sampled_states(ax)
fig.tight_layout()
fig.savefig('cluster-samples.pdf')
# {{xdg_open('cluster-samples.pdf')}}
## Render vmd
with open('load-cluster-samples.tcl', 'w') as f:
f.write(load_in_vmd())
|
mpharrigan/mixtape
|
msmbuilder/project_templates/cluster/sample-clusters-plot.py
|
Python
|
lgpl-2.1
| 2,612
|
[
"VMD"
] |
a9a44f99c4701721d88c874db16e6244cccd8bbe2909f5d5a1f86eae21a13d7a
|
import itertools
import os
from collections import (
defaultdict,
Mapping
)
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
import pysam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
from matplotlib.backends.backend_pdf import PdfPages # noqa: E402
# from https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
def dict_merge(dct, merge_dct):
"""
Merge dicts recursively.
Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k in merge_dct:
if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def dd():
"""Return defaultdict with dict factory."""
return defaultdict(dict)
def get_coverage(file, label, regions=None, nth=1, readcount=-1):
"""Get coverage for every `nth` position from alignment file."""
readcount = float(readcount)
contigs_coverage = defaultdict(dd)
with pysam.AlignmentFile(file) as f:
if isinstance(regions, str):
regions = [regions]
for region in regions:
chrom = region
if ':' in region and '-' in region:
chrom = region.rsplit(':', 1)[0]
start_stop = region.rsplit(':', 1)[1]
start, stop = start_stop.split('-')
start = int(start.strip())
stop = int(stop.strip())
else:
start = 0
stop = f.header.get_reference_length(chrom)
contigs_coverage[chrom][label][start] = 0
contigs_coverage[chrom][label][stop] = 0
for pileup_pos in f.pileup(contig=chrom, start=start, stop=stop, max_depth=20000):
pos = pileup_pos.pos
if pileup_pos.pos % nth == 0:
before = pos - nth
after = pos + nth
if before not in contigs_coverage[chrom][label]:
contigs_coverage[chrom][label][before] = 0
contigs_coverage[chrom][label][after] = 0
contigs_coverage[chrom][label][pos] = pileup_pos.nsegments / (readcount / 10**6) if readcount else 0
return contigs_coverage
def plot_coverage(contigs_coverage, style='ggplot', plot_kind='area', nrows=8):
"""Plot coverage in contigs_coverage."""
plt.style.use(style)
figs = []
i = 0
for title, data in contigs_coverage.items():
if i % nrows == 0:
fig_axes = plt.subplots(nrows=nrows, figsize=(10, 20))
fig = fig_axes[0]
axes = fig_axes[1:]
plt.subplots_adjust(hspace=0.3)
figs.append(fig)
i += 1
nrow = i - (int(i / nrows) * nrows)
df = pd.DataFrame.from_dict(data).reset_index()
df = df.sort_values('index').fillna(0)
ax = df.plot(
x='index',
kind=plot_kind,
title=title,
stacked=False,
alpha=0.5,
fig=fig,
ax=axes[0][nrow]
)
ax.set_xlabel("nt")
ax.legend(bbox_to_anchor=(1.1, 1), loc="upper right")
plt.tight_layout()
return figs
def mp_get_coverage(args):
"""Wrap get_coverage for multiprocessing Pool implementation."""
return get_coverage(*args)
def get_total_coverage(file, cores=1):
"""Get number of reads in file."""
return int(pysam.view('-c', "-@%d" % cores, file).strip())
def plot_coverage_in_regions(files, labels, output_path, regions=None, cores=1, total_reads=None, style='ggplot', plot_kind='area'):
"""
Plot coverage for `files`, where files are multiple BAM files.
`output_path` is the path at which the plot should be saved
`regions` can be speficied, these should be chromosome names for now.
"""
if not regions:
regions = pysam.AlignmentFile(files[0]).references
for f in files:
if not os.path.exists("%s.bai" % f):
pysam.index(f)
if not total_reads or sum(total_reads) == 0:
total_reads = [get_total_coverage(file, cores=cores) for file in files]
starmap_args = [(file, label, region, 1, reads) for (file, label, reads), region in itertools.product(zip(files, labels, total_reads), regions)]
if cores == 1:
r = itertools.starmap(get_coverage, starmap_args)
else:
pool = ProcessPoolExecutor(max_workers=cores)
r = pool.map(mp_get_coverage, starmap_args)
pool.shutdown()
contigs_coverage = next(r)
for d in r:
dict_merge(contigs_coverage, d)
figs = plot_coverage(contigs_coverage, style=style, plot_kind=plot_kind)
if output_path:
with PdfPages(output_path) as pdf:
for f in figs:
pdf.savefig(f)
return figs
|
bardin-lab/readtagger
|
readtagger/plot_coverage.py
|
Python
|
mit
| 5,120
|
[
"pysam"
] |
f282ab5f0cb25cc673217fa625fccadce00d22f1bb372daa91ce23221d8c8065
|
#!/usr/bin/env
"""
UP_HRV2OISST_model_prep.py
DataSource: ftp://ftp.cdc.noaa.gov/Datasets/noaa.oisst.v2.highres/
NOAA High Resolution SST data provided by the NOAA/OAR/ESRL PSD,
Boulder, Colorado, USA, from their Web site at http://www.esrl.noaa.gov/psd/
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import Dataset
from netCDF4 import date2num
# User Stack
import utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','Unimak', 'Shumagin','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC_SST(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=0. )
ncinstance.add_data('T_25', data[0])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" days since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
def pythondate2str(pdate):
(year,month,day) = datetime.datetime.fromordinal(int(pdate)).strftime('%Y-%b-%d').split('-')
delta_t = pdate - int(pdate)
dhour = str(int(np.floor(24 * (delta_t))))
dmin = str(int(np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))
dsec = str(int(np.floor(60 * ((60 * ((24 * (delta_t)) - np.floor(24 * (delta_t)))) - \
np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))))
#add zeros to time
if len(dhour) == 1:
dhour = '0' + dhour
if len(dmin) == 1:
dmin = '0' + dmin
if len(dsec) == 1:
dsec = '0' + dsec
return year + '-' + month + '-' + day + ' ' + dhour+':'+dmin+':'+dsec
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '../data/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Users/bell/in_and_outbox/data_sets/reanalyis_data/OISSTV2/'
infile = [NARR + 'sst.day.anom.2019.v2.nc']
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
#stn ['1','2']
station_name = ['ShumaginDown']
sta_lat = [54.5]
sta_long = [161]
#Find NCEP nearest point to moorings - haversine formula
# NCEP data is 0->360 (positive east), Moorings are usually expressed +W for FOCI
stn1_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '1d')
stn1_modelpt = [lat_lon['lat'][stn1_pt[3]],lat_lon['lon'][stn1_pt[4]]]
print "stn1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], stn1_modelpt[0], stn1_modelpt[1])
stn1_modelpt[1] = -1.*((180 - stn1_modelpt[1]) + 180)
print "thus converting lon to degrees W positive {0}".format(stn1_modelpt[1])
#loop over all requested data
years = range(2016,2019)
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'sst.day.anom.'+ str(yy) + '.v2.nc'
print "Working on file " + infile
stn1_data = from_netcdf_1dsplice(infile, None, stn1_pt[3], stn1_pt[4])
stn1_sst = stn1_data['anom']
#convert to EPIC time
pydate = date2pydate(stn1_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NOAA_OI_SST_V2_anom_stn1_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], stn1_modelpt, [stn1_sst,])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=52, \
urcrnrlat=58,llcrnrlon=-165,urcrnrlon=-155, lat_ts=45)
# Mooring Data
x_moor, y_moor = m(-1. * sta_long[0],sta_lat[0])
x_close, y_close = m(stn1_modelpt[1], stn1_modelpt[0])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(50,62,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-165,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/shumigans_region.png', bbox_inches='tight', dpi = (100))
plt.close()
|
shaunwbell/FOCI_Analysis
|
ReanalysisRetreival_orig/UnimakPass/UP_HRV2OISST_model_prep.py
|
Python
|
mit
| 8,998
|
[
"NetCDF"
] |
bdd68003ad4b5eda3c99bda0435a0f741cae3593f454162dbe0393dd906db5f3
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
logger = logging.getLogger('camelot.view.export.html')
def open_html_in_desktop_service(html):
import os
import tempfile
html_fd, html_fn = tempfile.mkstemp(suffix='.html')
html_file = os.fdopen(html_fd, 'wb')
html_file.write(html.encode('utf-8'))
html_file.close()
from PyQt4 import QtGui, QtCore
QtGui.QDesktopServices.openUrl(QtCore.QUrl('file://%s' % html_fn))
|
kurtraschke/camelot
|
camelot/view/export/desktop_service.py
|
Python
|
gpl-2.0
| 1,481
|
[
"VisIt"
] |
eebd211e0411f01bb2def5954940e8db81d52f8c846d16921e5b1f8844573713
|
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Charles Ludowici, """ ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import time, sys, platform, os
from math import atan, atan2, pi, cos, sin, sqrt, ceil, radians, degrees
import numpy as np
import psychopy, psychopy.info
import copy
from psychopy import visual, sound, monitors, logging, gui, event, core, data
try:
from helpersAOH import accelerateComputer, openMyStimWindow
except Exception as e:
print(e); print('Problem loading helpersAOH. Check that the file helpersAOH.py in the same directory as this file')
print('Current directory is ',os.getcwd())
eyeTracking = False
if eyeTracking:
try:
import eyelinkEyetrackerForPsychopySUPA3
except Exception as e:
print(e)
print('Problem loading eyelinkEyetrackerForPsychopySUPA3. Check that the file eyelinkEyetrackerForPsychopySUPA3.py in the same directory as this file')
print('While a different version of pylink might make your eyetracking code work, your code appears to generally be out of date. Rewrite your eyetracker code based on the SR website examples')
#Psychopy v1.83.01 broke this, pylink version prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing. But what really needs to be done is to change eyetracking code to more modern calls, as indicated on SR site
eyeTracking = False
expname= "dot-jump"
demo = False; exportImages = False
autopilot = False
subject='test'
###############################
### Setup the screen parameters ##############################################################################################
##
allowGUI = False
units='deg' #'cm'
fullscrn=False
waitBlank=False
if True: #just so I can indent all the below
refreshRate= 85 *1.0; #160 #set to the framerate of the monitor
fullscrn=True; #show in small window (0) or full screen (1)
scrn=True #which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Use second screen':scrn, 'Fullscreen (timing errors if not)': fullscrn, 'Screen refresh rate': refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Use second screen', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Use second Screen': ''},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Use second screen']
print('scrn = ',scrn, ' from dialog box')
fullscrn = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
#monitor parameters
widthPix = 1280 #1440 #monitor width in pixels
heightPix =1024 #900 #monitor height in pixels
monitorwidth = 40.5 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
##
### END Setup of the screen parameters ##############################################################################################
####################################
askUserAndConfirmExpParams = True
if autopilot:
subject = 'autoTest'
###############################
### Ask user exp params ##############################################################################################
## askUserAndConfirmExpParams
if askUserAndConfirmExpParams:
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title=expname, pos=(200,400))
if not autopilot:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
else:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
myDlg.addField('autoPilotTime:', 0, tip='Auto response time relative to cue')
myDlg.addField('randomTime:',False, tip = 'Add (rounded) gaussian N(0,2) error to time offset?')
myDlg.addField('autoPilotSpace:',0, tip='Auto response position relative to cue')
myDlg.addField('randomSpace:',False, tip = 'Add (rounded) gaussian N(0,2) error to space offset?')
dlgLabelsOrdered.append('autoPilotTime')
dlgLabelsOrdered.append('randomTime')
dlgLabelsOrdered.append('autoPilotSpace')
dlgLabelsOrdered.append('randomSpace')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at response time', color='DimGrey') #works in PsychoPy1.84
#myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) #color names not working for some pre-1.84 versions
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
autoSpace = thisInfo[dlgLabelsOrdered.index('autoPilotSpace')]
autoTime = thisInfo[dlgLabelsOrdered.index('autoPilotTime')]
randomTime = thisInfo[dlgLabelsOrdered.index('randomTime')]
randomSpace = thisInfo[dlgLabelsOrdered.index('randomSpace')]
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
### Ask user exp params
## END askUserAndConfirmExpParams ###############################
##############################################################################################
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
fileNameWithPath = dataDir+os.sep+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level's and higher messages
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
####Functions. Save time by automating processes like stimulus creation and ordering
############################################################################
def oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, trialObjects):
cueFrame = cuePos * SOAFrames
cueMax = cueFrame + cueFrames
showIdx = int(np.floor(n/SOAFrames))
#objectIdxs = [i for i in range(len(trialObjects))]
#objectIdxs.append(len(trialObjects)-1) #AWFUL hack
#print(objectIdxs[showIdx])
#floored quotient
obj = trialObjects[showIdx]
drawObject = n%SOAFrames < itemFrames
if drawObject:
myWin.color = bgColor
if n >= cueFrame and n < cueMax:
#print('cueFrames! n is', n,'. cueFrame is ,', cueFrame, 'cueFrame + cueFrames is ', (cueFrame + cueFrames))
#if n%2 == 0: #This should make it flash, but it might be too fast
#print('cue flash')
#myWin.color = (0,0,0)
obj.draw()
cue.draw()
else:
obj.draw()
return True
#objects: Stimuli to display or
#cue: cue stimulus or stimuli
#timing parameters: Could be item duration, soa and isi. i.e. if SOA+Duration % n == 0: stimulus.setColor(stimulusColor)
#bgColor and stimulusColor: if displaying and hiding stimuli, i.e. for RSVP
#movementVector: direction and distance of movement if moving stimuli
def oneTrial(stimuli):
dotOrder = np.arange(len(stimuli))
np.random.shuffle(dotOrder)
print(dotOrder)
shuffledStimuli = [stimuli[i] for i in dotOrder]
ts = []
myWin.flip(); myWin.flip() #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
t0 = trialClock.getTime()
for n in range(trialFrames):
fixation.draw()
#print(n//SOAFrames)
oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, shuffledStimuli)
myWin.flip()
ts.append(trialClock.getTime() - t0)
return True, shuffledStimuli, dotOrder, ts
def getResponse(trialStimuli):
if autopilot:
spacing = 360./nDots
autoResponseIdx = cuePos + autoTime #The serial position of the response in the stream
if randomTime:
autoResponseIdx += int(round( np.random.normal(0,2) ))
itemAtTemporalSelection = trialStimuli[autoResponseIdx]
unshuffledPositions = [dot.pos.tolist() for dot in stimuli]
itemSpatial = unshuffledPositions.index(itemAtTemporalSelection.pos.tolist())
itemSpatial = itemSpatial + autoSpace
if randomSpace:
itemSpatial += int(round( np.random.normal(0,2) ))
while itemSpatial>23:
itemSpatial = itemSpatial - 23
#Once we have temporal pos of selected item relative to start of the trial
#Need to get the serial spatial pos of this item, so that we can select items around it based on the autoSpace offset
#print('itemSpatial is: ', itemSpatial)
selectionTemporal = trialStimuli.index(stimuli[itemSpatial]) #This seems redundant, but it tests that the item we've selected in space is the cued item in time. if the temporal and spatial offsets are 0, it should be the same as cuePos.
accuracy = cuePos == selectionTemporal
mousePos = (stimuli[itemSpatial].pos[0],stimuli[itemSpatial].pos[1])
expStop = False
item = stimuli[itemSpatial]
return accuracy, item, expStop, mousePos
elif not autopilot:
myMouse = event.Mouse(visible = False,win=myWin)
responded = False
expStop = False
event.clearEvents()
mousePos = (1e6,1e6)
escape = event.getKeys()
myMouse.setPos((0,0))
myMouse.setVisible(True)
while not responded:
for item in trialStimuli:
item.draw()
myWin.flip()
button = myMouse.getPressed()
mousePos = myMouse.getPos()
escapeKey = event.getKeys()
if button[0]:
print('click detected')
responded = True
print('getResponse mousePos:',mousePos)
elif len(escapeKey)>0:
if escapeKey[0] == 'space' or escapeKey[0] == 'ESCAPE':
expStop = True
responded = True
return False, np.random.choice(trialStimuli), expStop, (0,0)
clickDistances = []
for item in trialStimuli:
x = mousePos[0] - item.pos[0]
y = mousePos[1] - item.pos[1]
distance = sqrt(x**2 + y**2)
clickDistances.append(distance)
if not expStop:
minDistanceIdx = clickDistances.index(min(clickDistances))
accuracy = minDistanceIdx == cuePos
item = trialStimuli[minDistanceIdx]
myMouse.setVisible(False)
return accuracy, item, expStop, mousePos
def drawStimuli(nDots, radius, center, stimulusObject, sameEachTime = True):
if len(center) > 2 or len(center) < 2:
print('Center coords must be list of length 2')
return None
if not sameEachTime and not isinstance(stimulusObject, (list, tuple)):
print('You want different objects in each position, but your stimuli is not a list or tuple')
return None
if not sameEachTime and isinstance(stimulusObject, (list, tuple)) and len(stimulusObject)!=nDots:
print('You want different objects in each position, but the number of positions does not equal the number of items')
return None
spacing = 360./nDots
stimuli = []
for dot in range(nDots): #have to specify positions for multiples of 90deg because python (computers in general?) can't store exact value of pi and thus cos(pi/2) = 6.123e-17, not 0
angle = dot*spacing
if angle == 0:
xpos = radius
ypos = 0
elif angle == 90:
xpos = 0
ypos = radius
elif angle == 180:
xpos = -radius
ypos = 0
elif angle == 270:
xpos = 0
ypos = -radius
elif angle%90!=0:
xpos = radius*cos(radians(angle))
ypos = radius*sin(radians(angle))
if sameEachTime:
stim = copy.copy(stimulusObject)
elif not sameEachTime:
stim = stimulusObject[dot]
stim.pos = (xpos,ypos)
stimuli.append(stim)
return stimuli
def checkTiming(ts):
interframeIntervals = np.diff(ts) * 1000
#print(interframeIntervals)
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervals > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong > 0:
print(numCasesInterframeLong,'frames of', trialFrames,'were longer than',str(1000/refreshRate*(1.0+frameTimeTolerance)))
return numCasesInterframeLong
##Set up stimuli
stimulus = visual.Circle(myWin, radius = .2, fillColor = (1,1,1) )
nDots = 24
radius = 4
center = (0,0)
sameEachTime = True
#(nDots, radius, center, stimulusObject, sameEachTime = True)
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
#print(stimuli)
#print('length of stimuli object', len(stimuli))
######Create visual objects, noise masks, response prompts etc. ###########
######Draw your stimuli here if they don't change across trials, but other parameters do (like timing or distance)
######If you want to automate your stimuli. Do it in a function below and save clutter.
######For instance, maybe you want random pairs of letters. Write a function!
###########################################################################
fixSize = .1
fixation= visual.Circle(myWin, radius = fixSize , fillColor = (1,1,1), units=units)
cue = visual.Circle(myWin, radius = radius + 2, fillColor = None, lineColor = (1,1,1), units = units)
###Trial timing parameters
SOAMS = 333.333
itemMS = 111.111
ISIMS = SOAMS - itemMS
trialMS = SOAMS * nDots
cueMS = itemMS
SOAFrames = int(np.floor(SOAMS/(1000./refreshRate)))
itemFrames = int(np.floor(itemMS/(1000./refreshRate)))
ISIFrames = int(np.floor(ISIMS/(1000./refreshRate)))
trialFrames = int(nDots*SOAFrames)
cueFrames = int(np.floor(cueMS/(1000./refreshRate)))
print('cueFrames=',cueFrames)
print('itemFrames=',itemFrames)
print('refreshRate =', refreshRate)
print('cueMS from frames =', cueFrames*(1000./refreshRate))
print('num of SOAs in the trial:', trialFrames/SOAFrames)
##Factorial design
numResponsesPerTrial = 1 #default. Used to create headers for dataFile
stimList = []
#cuePositions = [dot for dot in range(nDots) if dot not in [0,nDots-1]]
cuePositions = [10]
print('cuePositions: ',cuePositions)
#cuePositions = cuePositions[2:(nDots-3)] #drop the first and final two dots
#Set up the factorial design (list of all conditions)
for cuePos in cuePositions:
stimList.append({'cuePos':cuePos})
trials = data.TrialHandler(stimList, nReps = trialsPerCondition)
#print(trials)
####Create output file###
#########################################################################
dataFile = open(fileNameWithPath + '.txt', 'w')
numResponsesPerTrial = 1
#headers for initial datafile rows, they don't get repeated. These appear in the file in the order they appear here.
oneOffHeaders = [
'subject',
'task',
'staircase',
'trialNum'
]
for header in oneOffHeaders:
print(header, '\t', end='', file=dataFile)
#Headers for duplicated datafile rows. These are repeated using numResponsesPerTrial. For instance, we might have two responses in a trial.
duplicatedHeaders = [
'responseSpatialPos',
'responseX',
'responseY',
'correctX',
'correctY',
'clickX',
'clickY',
'accuracy',
'responsePosInStream',
'correctPosInStream'
]
if numResponsesPerTrial == 1:
for header in duplicatedHeaders:
print(header, '\t', end='', file=dataFile)
elif numResponsesPerTrial > 1:
for response in range(numResponsesPerTrial):
for header in duplicatedHeaders:
print(header+str(response), '\t', end='', file=dataFile)
for pos in range(nDots):
print('position'+str(pos),'\t',end='',file=dataFile)
#Headers done. Do a new line
print('longFrames',file=dataFile)
expStop = False
trialNum=0; numTrialsCorrect=0; expStop=False; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
#NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
#NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
if eyeTracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
fixation.draw()
myWin.flip()
if not autopilot:
core.wait(1)
trial = trials.next()
# print('trial idx is',trials.thisIndex)
cuePos = trial.cuePos
# print(cuePos)
print("Doing trialNum",trialNum)
trialDone, trialStimuli, trialStimuliOrder, ts = oneTrial(stimuli)
#Shift positions so that the list starts at 1, which is positioned at (0,radius), and increases clockwise. This is what the MM code expects
MMPositions = list() #Mixture modelling positions
for dotPos in trialStimuliOrder:
if dotPos < (nDots/4 - 1): #Because python indexes start at 0, 5 is the 6th pos.
MMPositions.append(dotPos + 20)
elif dotPos >= (nDots/4 -1):
MMPositions.append(dotPos -4)
nBlips = checkTiming(ts)
# print(trialStimuliOrder)
if trialDone:
accuracy, response, expStop, clickPos = getResponse(trialStimuli)
responseCoord = response.pos.tolist()
spatialRelativeToXAxis = [item.pos.tolist() for item in stimuli]
try:
responseSpatialRelativeToXAxis = spatialRelativeToXAxis.index(responseCoord)
except ValueError:
print('coord not in list')
if responseSpatialRelativeToXAxis < (nDots/4-1):
responseSpatial = responseSpatialRelativeToXAxis + 20
elif responseSpatialRelativeToXAxis >= (nDots/4-1):
responseSpatial = responseSpatialRelativeToXAxis - 4
trialPositions = [item.pos.tolist() for item in trialStimuli]
responseTemporal = trialPositions.index(responseCoord)
# print('trial positions in sequence:',trialPositions)
# print('position of item nearest to click:',responseSpatial)
# print('Position in sequence of item nearest to click:',responseTemporal)
correctSpatial = trialStimuli[cuePos].pos
correctTemporal = cuePos
print(subject,'\t',
'dot-jump','\t',
'False','\t',
trialNum,'\t',
responseSpatial,'\t',
responseCoord[0],'\t',
responseCoord[1],'\t',
correctSpatial[0],'\t',
correctSpatial[1],'\t',
clickPos[0],'\t',
clickPos[1],'\t',
accuracy,'\t',
responseTemporal,'\t',
correctTemporal,'\t',
end='',
file = dataFile
)
for dot in range(nDots):
print(MMPositions[dot], '\t',end='', file=dataFile)
print(nBlips, file=dataFile)
trialNum += 1
dataFile.flush()
if expStop:
dataFile.flush()
|
alexholcombe/dot-jump
|
dataRaw/Fixed Cue/test_dot-jump25Oct2016_10-53.py
|
Python
|
gpl-3.0
| 25,090
|
[
"Gaussian"
] |
60e810fcb51ee30c84c1589a2cd0f99766db4739b3c022e08e8295c1b5910f0a
|
# Contour Copyright (C) 2013-2014 Hiroyuki Sakai
#
# This file is part of Contour.
#
# Contour is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Contour is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Contour. If not, see <http://www.gnu.org/licenses/>.
"""Describes the models used in Contour.
.. moduleauthor:: Hiroyuki Sakai <hiroyuki.sakai@student.tuwien.ac.at>
"""
from django.db import models
class Image(models.Model):
"""
Stores a single image which serves as a template for :class:`.Drawing` objects .
"""
title = models.CharField(max_length=255)
"""The title of the image."""
author = models.CharField(max_length=255, blank=True)
"""Optional author of the image (e.g. Pablo Picasso)."""
url = models.URLField(blank=True)
"""Optional link to additional information."""
image = models.ImageField(upload_to='images/')
""":class:`django.db.models.ImageField` to the image on the filesystem."""
edge_image = models.ImageField(upload_to='edge_images/', blank=True)
""":class:`django.db.models.ImageField` to the edge image on the filesystem. The edge image is calculated automatically."""
dilated_edge_image = models.ImageField(upload_to='dilated_edge_images/', blank=True)
""":class:`django.db.models.ImageField` to the dilated edge image on the filesystem. The dilated edge image is used to assess the score of a drawing."""
canny_sigma = models.FloatField(default=2)
"""The sigma value for the Gaussian blur which is performed before the Canny edge detection."""
canny_low_threshold = models.FloatField(default=.1)
"""The low threshold for the Canny edge detection."""
canny_high_threshold = models.FloatField(default=.2)
"""The high threshold for the Canny edge detection."""
max_distance = models.FloatField(blank=True, null=True)
"""This is a value which is used for the score calculation."""
def __unicode__(self):
return self.title + ' (' + str(self.image) + ')'
def delete(self, *args, **kwargs):
"""Deletes the :class:`.Image` object along with its associated files."""
image_storage, image_path = self.image.storage, self.image.path
edge_image_storage, edge_image_path = self.edge_image.storage, self.edge_image.path
# Delete the model before the file
super(Image, self).delete(*args, **kwargs)
# Delete the file after the model
image_storage.delete(image_path)
edge_image_storage.delete(edge_image_path)
class Track(models.Model):
"""
Stores a track which is a set of :class:`.Image` objects supposed to be drawn in succession.
"""
title = models.CharField(max_length=255)
description = models.TextField()
images = models.ManyToManyField(Image, through='TrackImage')
def __unicode__(self):
return self.title
class TrackImage(models.Model):
"""
Stores a many-to-many (n-to-n) relation between :class:`.Track` objects and :class:`.Image` objects.
"""
track = models.ForeignKey(Track)
"""The reference to the :class:`.Track` object."""
image = models.ForeignKey(Image)
"""The reference to the :class:`.Image` object."""
order = models.IntegerField()
"""The index of a :class:`.Image` object inside a :class:`.Track`."""
ordering = ['order']
class Meta:
ordering = ('order',)
class Player(models.Model):
"""
Stores a player which can be associated to :class:`.TrackSession` and :class:`.Drawing` objects.
"""
name = models.CharField(max_length=255, unique=True)
"""The name of the player."""
def __unicode__(self):
return self.name
class TrackSession(models.Model):
"""
Stores a track sesssion which stores the specific track session of a player.
"""
player = models.ForeignKey(Player, blank=True, null=True)
"""The reference to a :class:`.Player` object."""
track = models.ForeignKey(Track)
"""The reference to a :class:`.Track` object."""
score = models.FloatField()
"""The attained score."""
session_key = models.CharField(max_length=32)
"""The associated session key of the user associated."""
datetime = models.DateTimeField(auto_now_add=True)
"""The creation time of the track session."""
def __unicode__(self):
string = str(self.datetime) + ' '
if self.player:
string += str(self.player) + ': '
string += str(self.track)
return string
class Drawing(models.Model):
"""
Stores a drawing conducted by a player.
"""
player = models.ForeignKey(Player, blank=True, null=True)
"""The reference to the :class:`.Player` object who was drawn the drawing."""
image = models.ForeignKey(Image)
"""The reference to the :class:`.Image` object which is the template for the drawing."""
drawing = models.ImageField(upload_to='drawings/')
""":class:`django.db.models.ImageField` to the drawing on the filesystem."""
score_image = models.ImageField(upload_to='score_images/')
""":class:`django.db.models.ImageField` to the score image on the filesystem."""
distance = models.FloatField()
"""The distance of between the drawing and the original (edge) image."""
score = models.FloatField()
"""The attained score for the drawing."""
datetime = models.DateTimeField(auto_now=True)
"""The modification time for the drawing."""
track_session = models.ForeignKey(TrackSession, blank=True, null=True)
"""The reference to the :class:`.TrackSession` (optional)."""
track_session_index = models.IntegerField(blank=True, null=True)
"""The index of the drawing inside a :class:`.TrackSession`.."""
def __unicode__(self):
string = str(self.datetime) + ' '
if self.player:
string += str(self.player) + ': '
string += self.image.title
return string
def delete(self, *args, **kwargs):
"""Deletes the :class:`.Drawing` object along with its associated files."""
drawing_storage, drawing_path = self.drawing.storage, self.drawing.path
# Delete the model before the file
super(Drawing, self).delete(*args, **kwargs)
# Delete the file after the model
drawing_storage.delete(drawing_path)
|
HiroyukiSakai/Contour
|
contour/models.py
|
Python
|
gpl-3.0
| 6,772
|
[
"Gaussian"
] |
8230e1f00cd8627e5e807822c3adde9bddc00f03c125e70a516368b00b2ac603
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pandas as pd
import pytest
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.chronos.data.utils.public_dataset import PublicDataset
class TestPublicDataset(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_init_get_dataset(self):
name = 'nyc_taxi'
path = '~/.chronos/dataset/'
# illegle input.
with pytest.raises(AssertionError):
PublicDataset(name, path, redownload=False).get_public_data(chunk_size='1024')
def test_get_nyc_taxi(self):
name = 'nyc_taxi'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/apps/nyc-taxi/nyc_taxi.csv"
public_data = PublicDataset(name, path, redownload=False, with_split=False)
public_data.df = pd.read_csv(file_url, parse_dates=['timestamp'])
tsdata = public_data.get_tsdata(target_col='value', dt_col='timestamp')
assert set(tsdata.df.columns) == {'id', 'timestamp', 'value'}
assert tsdata.df.shape == (10320, 3)
tsdata._check_basic_invariants()
def test_get_network_traffic(self):
name = 'network_traffic'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/network-traffic/data/data.csv"
public_data = PublicDataset(name, path, redownload=False, with_split=False)
public_data.df = pd.read_csv(file_url)
public_data.df.StartTime = pd.to_datetime(public_data.df.StartTime)
public_data.df.AvgRate = public_data.df.AvgRate.apply(lambda x: float(x[:-4])
if x.endswith("Mbps")
else float(x[:-4])*1000)
tsdata = public_data.get_tsdata(target_col=['AvgRate', 'total'], dt_col='StartTime')
assert tsdata.df.shape == (8760, 5)
assert set(tsdata.df.columns) == {'StartTime', 'EndTime', 'AvgRate', 'total', 'id'}
tsdata._check_basic_invariants()
def test_get_fsi(self):
name = 'fsi'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/chronos-aiops/m_1932.csv"
public_data = PublicDataset(name, path, redownload=False, with_split=False)
public_data.df = pd.read_csv(file_url, usecols=[1, 2, 3],
names=['time_step', 'cpu_usage', 'mem_usage'])
public_data.df.sort_values(by="time_step", inplace=True)
public_data.df.reset_index(inplace=True, drop=True)
public_data.df.time_step = pd.to_datetime(public_data.df.time_step,
unit='s',
origin=pd.Timestamp('2018-01-01'))
tsdata = public_data.get_tsdata(dt_col='time_step', target_col='cpu_usage')
assert tsdata.df.shape == (61570, 4)
assert set(tsdata.df.columns) == {'time_step', 'cpu_usage', 'mem_usage', 'id'}
tsdata._check_basic_invariants()
def test_get_uci_electricity(self):
name = 'uci_electricity'
path = '~/.chronos/dataset'
if os.environ.get('FTP_URI', None):
file_url = f"{os.getenv('FTP_URI')}/analytics-zoo-data/apps/ElectricityLD/uci_electricity_data.csv"
public_data = PublicDataset(name, path, redownload=False, with_split=False)
df = pd.read_csv(file_url,
delimiter=';',
parse_dates=['Unnamed: 0'],
nrows=10000,
low_memory=False)
public_data.df = pd.melt(df,
id_vars=['Unnamed: 0'],
value_vars=df.T.index[1:])\
.rename(columns={'Unnamed: 0': 'timestamp',
'variable': 'id'})
tsdata = public_data.get_tsdata(dt_col='timestamp',
target_col='value',
id_col='id')
assert set(tsdata.df.columns) == {'id', 'timestamp', 'value'}
|
intel-analytics/BigDL
|
python/chronos/test/bigdl/chronos/data/utils/test_public_dataset.py
|
Python
|
apache-2.0
| 5,094
|
[
"ORCA"
] |
03ae6060cebe331c2f500d42ac8e9e7967a37f72e14c6f829eae037583b2e552
|
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 21817 $
# Date: $Date: 2005-07-21 22:39:57 +0200 (Thu, 21 Jul 2005) $
# Copyright: This module has been placed in the public domain.
"""
Parser for Python modules. Requires Python 2.2 or higher.
The `parse_module()` function takes a module's text and file name,
runs it through the module parser (using compiler.py and tokenize.py)
and produces a parse tree of the source code, using the nodes as found
in pynodes.py. For example, given this module (x.py)::
# comment
'''Docstring'''
'''Additional docstring'''
__docformat__ = 'reStructuredText'
a = 1
'''Attribute docstring'''
class C(Super):
'''C's docstring'''
class_attribute = 1
'''class_attribute's docstring'''
def __init__(self, text=None):
'''__init__'s docstring'''
self.instance_attribute = (text * 7
+ ' whaddyaknow')
'''instance_attribute's docstring'''
def f(x, # parameter x
y=a*5, # parameter y
*args): # parameter args
'''f's docstring'''
return [x + item for item in args]
f.function_attribute = 1
'''f.function_attribute's docstring'''
The module parser will produce this module documentation tree::
<module_section filename="test data">
<docstring>
Docstring
<docstring lineno="5">
Additional docstring
<attribute lineno="7">
<object_name>
__docformat__
<expression_value lineno="7">
'reStructuredText'
<attribute lineno="9">
<object_name>
a
<expression_value lineno="9">
1
<docstring lineno="10">
Attribute docstring
<class_section lineno="12">
<object_name>
C
<class_base>
Super
<docstring lineno="12">
C's docstring
<attribute lineno="16">
<object_name>
class_attribute
<expression_value lineno="16">
1
<docstring lineno="17">
class_attribute's docstring
<method_section lineno="19">
<object_name>
__init__
<docstring lineno="19">
__init__'s docstring
<parameter_list lineno="19">
<parameter lineno="19">
<object_name>
self
<parameter lineno="19">
<object_name>
text
<parameter_default lineno="19">
None
<attribute lineno="22">
<object_name>
self.instance_attribute
<expression_value lineno="22">
(text * 7 + ' whaddyaknow')
<docstring lineno="24">
instance_attribute's docstring
<function_section lineno="27">
<object_name>
f
<docstring lineno="27">
f's docstring
<parameter_list lineno="27">
<parameter lineno="27">
<object_name>
x
<comment>
# parameter x
<parameter lineno="27">
<object_name>
y
<parameter_default lineno="27">
a * 5
<comment>
# parameter y
<parameter excess_positional="1" lineno="27">
<object_name>
args
<comment>
# parameter args
<attribute lineno="33">
<object_name>
f.function_attribute
<expression_value lineno="33">
1
<docstring lineno="34">
f.function_attribute's docstring
(Comments are not implemented yet.)
compiler.parse() provides most of what's needed for this doctree, and
"tokenize" can be used to get the rest. We can determine the line
number from the compiler.parse() AST, and the TokenParser.rhs(lineno)
method provides the rest.
The Docutils Python reader component will transform this module doctree into a
Python-specific Docutils doctree, and then a `stylist transform`_ will
further transform it into a generic doctree. Namespaces will have to be
compiled for each of the scopes, but I'm not certain at what stage of
processing.
It's very important to keep all docstring processing out of this, so that it's
a completely generic and not tool-specific.
> Why perform all of those transformations? Why not go from the AST to a
> generic doctree? Or, even from the AST to the final output?
I want the docutils.readers.python.moduleparser.parse_module() function to
produce a standard documentation-oriented tree that can be used by any tool.
We can develop it together without having to compromise on the rest of our
design (i.e., HappyDoc doesn't have to be made to work like Docutils, and
vice-versa). It would be a higher-level version of what compiler.py provides.
The Python reader component transforms this generic AST into a Python-specific
doctree (it knows about modules, classes, functions, etc.), but this is
specific to Docutils and cannot be used by HappyDoc or others. The stylist
transform does the final layout, converting Python-specific structures
("class" sections, etc.) into a generic doctree using primitives (tables,
sections, lists, etc.). This generic doctree does *not* know about Python
structures any more. The advantage is that this doctree can be handed off to
any of the output writers to create any output format we like.
The latter two transforms are separate because I want to be able to have
multiple independent layout styles (multiple runtime-selectable "stylist
transforms"). Each of the existing tools (HappyDoc, pydoc, epydoc, Crystal,
etc.) has its own fixed format. I personally don't like the tables-based
format produced by these tools, and I'd like to be able to customize the
format easily. That's the goal of stylist transforms, which are independent
from the Reader component itself. One stylist transform could produce
HappyDoc-like output, another could produce output similar to module docs in
the Python library reference manual, and so on.
It's for exactly this reason:
>> It's very important to keep all docstring processing out of this, so that
>> it's a completely generic and not tool-specific.
... but it goes past docstring processing. It's also important to keep style
decisions and tool-specific data transforms out of this module parser.
Issues
======
* At what point should namespaces be computed? Should they be part of the
basic AST produced by the ASTVisitor walk, or generated by another tree
traversal?
* At what point should a distinction be made between local variables &
instance attributes in __init__ methods?
* Docstrings are getting their lineno from their parents. Should the
TokenParser find the real line no's?
* Comments: include them? How and when? Only full-line comments, or
parameter comments too? (See function "f" above for an example.)
* Module could use more docstrings & refactoring in places.
"""
__docformat__ = 'reStructuredText'
import sys
import compiler
import compiler.ast
import tokenize
import token
from compiler.consts import OP_ASSIGN
from compiler.visitor import ASTVisitor
from types import StringType, UnicodeType, TupleType
from docutils.readers.python import pynodes
from docutils.nodes import Text
def parse_module(module_text, filename):
"""Return a module documentation tree from `module_text`."""
ast = compiler.parse(module_text)
token_parser = TokenParser(module_text)
visitor = ModuleVisitor(filename, token_parser)
compiler.walk(ast, visitor, walker=visitor)
return visitor.module
class BaseVisitor(ASTVisitor):
def __init__(self, token_parser):
ASTVisitor.__init__(self)
self.token_parser = token_parser
self.context = []
self.documentable = None
def default(self, node, *args):
self.documentable = None
#print 'in default (%s)' % node.__class__.__name__
#ASTVisitor.default(self, node, *args)
def default_visit(self, node, *args):
#print 'in default_visit (%s)' % node.__class__.__name__
ASTVisitor.default(self, node, *args)
class DocstringVisitor(BaseVisitor):
def visitDiscard(self, node):
if self.documentable:
self.visit(node.expr)
def visitConst(self, node):
if self.documentable:
if type(node.value) in (StringType, UnicodeType):
self.documentable.append(make_docstring(node.value, node.lineno))
else:
self.documentable = None
def visitStmt(self, node):
self.default_visit(node)
class AssignmentVisitor(DocstringVisitor):
def visitAssign(self, node):
visitor = AttributeVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
if visitor.attributes:
self.context[-1].extend(visitor.attributes)
if len(visitor.attributes) == 1:
self.documentable = visitor.attributes[0]
else:
self.documentable = None
class ModuleVisitor(AssignmentVisitor):
def __init__(self, filename, token_parser):
AssignmentVisitor.__init__(self, token_parser)
self.filename = filename
self.module = None
def visitModule(self, node):
self.module = module = pynodes.module_section()
module['filename'] = self.filename
append_docstring(module, node.doc, node.lineno)
self.context.append(module)
self.documentable = module
self.visit(node.node)
self.context.pop()
def visitImport(self, node):
self.context[-1] += make_import_group(names=node.names,
lineno=node.lineno)
self.documentable = None
def visitFrom(self, node):
self.context[-1].append(
make_import_group(names=node.names, from_name=node.modname,
lineno=node.lineno))
self.documentable = None
def visitFunction(self, node):
visitor = FunctionVisitor(self.token_parser,
function_class=pynodes.function_section)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
def visitClass(self, node):
visitor = ClassVisitor(self.token_parser)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.klass)
class AttributeVisitor(BaseVisitor):
def __init__(self, token_parser):
BaseVisitor.__init__(self, token_parser)
self.attributes = pynodes.class_attribute_section()
def visitAssign(self, node):
# Don't visit the expression itself, just the attribute nodes:
for child in node.nodes:
self.dispatch(child)
expression_text = self.token_parser.rhs(node.lineno)
expression = pynodes.expression_value()
expression.append(Text(expression_text))
for attribute in self.attributes:
attribute.append(expression)
def visitAssName(self, node):
self.attributes.append(make_attribute(node.name,
lineno=node.lineno))
def visitAssTuple(self, node):
attributes = self.attributes
self.attributes = []
self.default_visit(node)
n = pynodes.attribute_tuple()
n.extend(self.attributes)
n['lineno'] = self.attributes[0]['lineno']
attributes.append(n)
self.attributes = attributes
#self.attributes.append(att_tuple)
def visitAssAttr(self, node):
self.default_visit(node, node.attrname)
def visitGetattr(self, node, suffix):
self.default_visit(node, node.attrname + '.' + suffix)
def visitName(self, node, suffix):
self.attributes.append(make_attribute(node.name + '.' + suffix,
lineno=node.lineno))
class FunctionVisitor(DocstringVisitor):
in_function = 0
def __init__(self, token_parser, function_class):
DocstringVisitor.__init__(self, token_parser)
self.function_class = function_class
def visitFunction(self, node):
if self.in_function:
self.documentable = None
# Don't bother with nested function definitions.
return
self.in_function = 1
self.function = function = make_function_like_section(
name=node.name,
lineno=node.lineno,
doc=node.doc,
function_class=self.function_class)
self.context.append(function)
self.documentable = function
self.parse_parameter_list(node)
self.visit(node.code)
self.context.pop()
def parse_parameter_list(self, node):
parameters = []
special = []
argnames = list(node.argnames)
if node.kwargs:
special.append(make_parameter(argnames[-1], excess_keyword=1))
argnames.pop()
if node.varargs:
special.append(make_parameter(argnames[-1],
excess_positional=1))
argnames.pop()
defaults = list(node.defaults)
defaults = [None] * (len(argnames) - len(defaults)) + defaults
function_parameters = self.token_parser.function_parameters(
node.lineno)
#print >>sys.stderr, function_parameters
for argname, default in zip(argnames, defaults):
if type(argname) is TupleType:
parameter = pynodes.parameter_tuple()
for tuplearg in argname:
parameter.append(make_parameter(tuplearg))
argname = normalize_parameter_name(argname)
else:
parameter = make_parameter(argname)
if default:
n_default = pynodes.parameter_default()
n_default.append(Text(function_parameters[argname]))
parameter.append(n_default)
parameters.append(parameter)
if parameters or special:
special.reverse()
parameters.extend(special)
parameter_list = pynodes.parameter_list()
parameter_list.extend(parameters)
self.function.append(parameter_list)
class ClassVisitor(AssignmentVisitor):
in_class = 0
def __init__(self, token_parser):
AssignmentVisitor.__init__(self, token_parser)
self.bases = []
def visitClass(self, node):
if self.in_class:
self.documentable = None
# Don't bother with nested class definitions.
return
self.in_class = 1
#import mypdb as pdb
#pdb.set_trace()
for base in node.bases:
self.visit(base)
self.klass = klass = make_class_section(node.name, self.bases,
doc=node.doc,
lineno=node.lineno)
self.context.append(klass)
self.documentable = klass
self.visit(node.code)
self.context.pop()
def visitGetattr(self, node, suffix=None):
if suffix:
name = node.attrname + '.' + suffix
else:
name = node.attrname
self.default_visit(node, name)
def visitName(self, node, suffix=None):
if suffix:
name = node.name + '.' + suffix
else:
name = node.name
self.bases.append(name)
def visitFunction(self, node):
if node.name == '__init__':
visitor = InitMethodVisitor(self.token_parser,
function_class=pynodes.method_section)
compiler.walk(node, visitor, walker=visitor)
else:
visitor = FunctionVisitor(self.token_parser,
function_class=pynodes.method_section)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
class InitMethodVisitor(FunctionVisitor, AssignmentVisitor): pass
class TokenParser:
def __init__(self, text):
self.text = text + '\n\n'
self.lines = self.text.splitlines(1)
self.generator = tokenize.generate_tokens(iter(self.lines).next)
self.next()
def __iter__(self):
return self
def next(self):
self.token = self.generator.next()
self.type, self.string, self.start, self.end, self.line = self.token
return self.token
def goto_line(self, lineno):
while self.start[0] < lineno:
self.next()
return token
def rhs(self, lineno):
"""
Return a whitespace-normalized expression string from the right-hand
side of an assignment at line `lineno`.
"""
self.goto_line(lineno)
while self.string != '=':
self.next()
self.stack = None
while self.type != token.NEWLINE and self.string != ';':
if self.string == '=' and not self.stack:
self.tokens = []
self.stack = []
self._type = None
self._string = None
self._backquote = 0
else:
self.note_token()
self.next()
self.next()
text = ''.join(self.tokens)
return text.strip()
closers = {')': '(', ']': '[', '}': '{'}
openers = {'(': 1, '[': 1, '{': 1}
del_ws_prefix = {'.': 1, '=': 1, ')': 1, ']': 1, '}': 1, ':': 1, ',': 1}
no_ws_suffix = {'.': 1, '=': 1, '(': 1, '[': 1, '{': 1}
def note_token(self):
if self.type == tokenize.NL:
return
del_ws = self.del_ws_prefix.has_key(self.string)
append_ws = not self.no_ws_suffix.has_key(self.string)
if self.openers.has_key(self.string):
self.stack.append(self.string)
if (self._type == token.NAME
or self.closers.has_key(self._string)):
del_ws = 1
elif self.closers.has_key(self.string):
assert self.stack[-1] == self.closers[self.string]
self.stack.pop()
elif self.string == '`':
if self._backquote:
del_ws = 1
assert self.stack[-1] == '`'
self.stack.pop()
else:
append_ws = 0
self.stack.append('`')
self._backquote = not self._backquote
if del_ws and self.tokens and self.tokens[-1] == ' ':
del self.tokens[-1]
self.tokens.append(self.string)
self._type = self.type
self._string = self.string
if append_ws:
self.tokens.append(' ')
def function_parameters(self, lineno):
"""
Return a dictionary mapping parameters to defaults
(whitespace-normalized strings).
"""
self.goto_line(lineno)
while self.string != 'def':
self.next()
while self.string != '(':
self.next()
name = None
default = None
parameter_tuple = None
self.tokens = []
parameters = {}
self.stack = [self.string]
self.next()
while 1:
if len(self.stack) == 1:
if parameter_tuple:
# Just encountered ")".
#print >>sys.stderr, 'parameter_tuple: %r' % self.tokens
name = ''.join(self.tokens).strip()
self.tokens = []
parameter_tuple = None
if self.string in (')', ','):
if name:
if self.tokens:
default_text = ''.join(self.tokens).strip()
else:
default_text = None
parameters[name] = default_text
self.tokens = []
name = None
default = None
if self.string == ')':
break
elif self.type == token.NAME:
if name and default:
self.note_token()
else:
assert name is None, (
'token=%r name=%r parameters=%r stack=%r'
% (self.token, name, parameters, self.stack))
name = self.string
#print >>sys.stderr, 'name=%r' % name
elif self.string == '=':
assert name is not None, 'token=%r' % (self.token,)
assert default is None, 'token=%r' % (self.token,)
assert self.tokens == [], 'token=%r' % (self.token,)
default = 1
self._type = None
self._string = None
self._backquote = 0
elif name:
self.note_token()
elif self.string == '(':
parameter_tuple = 1
self._type = None
self._string = None
self._backquote = 0
self.note_token()
else: # ignore these tokens:
assert (self.string in ('*', '**', '\n')
or self.type == tokenize.COMMENT), (
'token=%r' % (self.token,))
else:
self.note_token()
self.next()
return parameters
def make_docstring(doc, lineno):
n = pynodes.docstring()
if lineno:
# Really, only module docstrings don't have a line
# (@@: but maybe they should)
n['lineno'] = lineno
n.append(Text(doc))
return n
def append_docstring(node, doc, lineno):
if doc:
node.append(make_docstring(doc, lineno))
def make_class_section(name, bases, lineno, doc):
n = pynodes.class_section()
n['lineno'] = lineno
n.append(make_object_name(name))
for base in bases:
b = pynodes.class_base()
b.append(make_object_name(base))
n.append(b)
append_docstring(n, doc, lineno)
return n
def make_object_name(name):
n = pynodes.object_name()
n.append(Text(name))
return n
def make_function_like_section(name, lineno, doc, function_class):
n = function_class()
n['lineno'] = lineno
n.append(make_object_name(name))
append_docstring(n, doc, lineno)
return n
def make_import_group(names, lineno, from_name=None):
n = pynodes.import_group()
n['lineno'] = lineno
if from_name:
n_from = pynodes.import_from()
n_from.append(Text(from_name))
n.append(n_from)
for name, alias in names:
n_name = pynodes.import_name()
n_name.append(Text(name))
if alias:
n_alias = pynodes.import_alias()
n_alias.append(Text(alias))
n_name.append(n_alias)
n.append(n_name)
return n
def make_class_attribute(name, lineno):
n = pynodes.class_attribute()
n['lineno'] = lineno
n.append(Text(name))
return n
def make_attribute(name, lineno):
n = pynodes.attribute()
n['lineno'] = lineno
n.append(make_object_name(name))
return n
def make_parameter(name, excess_keyword=0, excess_positional=0):
"""
excess_keyword and excess_positional must be either 1 or 0, and
not both of them can be 1.
"""
n = pynodes.parameter()
n.append(make_object_name(name))
assert not excess_keyword or not excess_positional
if excess_keyword:
n['excess_keyword'] = 1
if excess_positional:
n['excess_positional'] = 1
return n
def trim_docstring(text):
"""
Trim indentation and blank lines from docstring text & return it.
See PEP 257.
"""
if not text:
return text
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = text.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def normalize_parameter_name(name):
"""
Converts a tuple like ``('a', ('b', 'c'), 'd')`` into ``'(a, (b, c), d)'``
"""
if type(name) is TupleType:
return '(%s)' % ', '.join([normalize_parameter_name(n) for n in name])
else:
return name
if __name__ == '__main__':
import sys
args = sys.argv[1:]
if args[0] == '-v':
filename = args[1]
module_text = open(filename).read()
ast = compiler.parse(module_text)
visitor = compiler.visitor.ExampleASTVisitor()
compiler.walk(ast, visitor, walker=visitor, verbose=1)
else:
filename = args[0]
content = open(filename).read()
print parse_module(content, filename).pformat()
|
baoboa/Crystal-Space
|
docs/support/docutils/readers/python/moduleparser.py
|
Python
|
lgpl-2.1
| 25,839
|
[
"CRYSTAL",
"VisIt"
] |
1e29e147e33973ba1240f66082a146afa1d1327a768b983a8dd379b3c99e6c25
|
import pytest
import ast
import symtab
import hpcs_builtins
from annotators import *
from typing import *
@pytest.fixture
def annotator():
return TypeAnnotator()
class TestTypeAnnotation:
def setup_method(self, method):
self.annotator = TypeAnnotator()
self.ast = ast.parse(method.__doc__)
self.ast = self.annotator.visit(self.ast)
def testModuleScopeSingleAssignment(self):
"""
a = 2
"""
assert self.ast.body[0].targets[0].typ == Int8
assert self.ast.scope.find_symbol("a").typ == Int8
def testFunctionScopeSingleAssignment(self):
"""
def test1(a : Int8):
b = a
"""
assert self.ast.body[0].body[0].targets[0].typ == Int8
assert self.ast.body[0].body[0].scope.find_symbol("a").typ == Int8
assert self.ast.body[0].scope.find_symbol("a").typ == Int8
def testBinaryOperationInt8(self):
"""
def test1():
a = 5
b = 67
c = a + b
"""
assert self.ast.body[0].body[2].targets[0].typ == Int8
assert self.ast.body[0].scope.find_symbol("a").typ == Int8
assert self.ast.body[0].scope.find_symbol("b").typ == Int8
assert self.ast.body[0].scope.find_symbol("c").typ == Int8
def testBinaryOperationInt16(self):
"""
def test1():
a = 5
b = 62000
c = a + b
"""
assert self.ast.body[0].body[2].targets[0].typ == Int16
assert self.ast.body[0].scope.find_symbol("a").typ == Int8
assert self.ast.body[0].scope.find_symbol("b").typ == Int16
assert self.ast.body[0].scope.find_symbol("c").typ == Int16
def testBinaryComparison(self):
"""
a = 34 < 59
"""
assert self.ast.scope.find_symbol("a").typ == Bool()
def testStringAssignment(self):
"""
s = "Some string"
"""
assert self.ast.body[0].scope.find_symbol("s").typ == String()
def testCharacterFromString(self):
"""
s = "A string"
c = s[3]
"""
assert self.ast.body[0].scope.find_symbol("c").typ == Int8
def testCallTypeAnnotation(self):
"""
def return_int8(a : Int8) -> Int8:
return 2*a
b = return_int8(3)
"""
assert self.ast.body[0].scope.find_symbol("b").typ == Int8
class TestCustomTypeAnnotation:
def setup_method(self, method):
self.ast = ast.parse(method.__doc__)
def testBuiltins(self):
"""
a = max(53, 45)
"""
builtins = symtab.SymbolTable(None)
builtins.add_symbol(symtab.Symbol("max",
Function(Int32, [Int32, Int32])))
annotator = TypeAnnotator(builtins)
self.ast = annotator.visit(self.ast)
assert self.ast.body[0].scope.find_symbol("a").typ == Int32
class TestArray:
def setup_method(self, method):
self.annotator = TypeAnnotator(hpcs_builtins.create_builtin_scope())
self.ast = ast.parse(method.__doc__)
self.ast = self.annotator.visit(self.ast)
def testPointerAccess(self):
"""
a = PlacedInt8Array(100, 0x0)
a[3] = 42
"""
assert self.ast.body[1].targets[0].typ == Int8
assert self.ast.body[0].scope.find_symbol("a").typ == Pointer(Int8)
class TestFailingTypeAnnotation:
def testUnknownSymbol(self):
annotator = TypeAnnotator()
an_ast = ast.parse("a = b")
with pytest.raises(KeyError):
an_ast = annotator.visit(an_ast)
def testIllegalAssignmentToArrayElement(self):
annotator = TypeAnnotator(hpcs_builtins.create_builtin_scope())
an_ast = ast.parse("""
a = PlacedInt8Array(100, 0x0)
a[3] = "SomeString"
""")
with pytest.raises(TypeError):
an_ast = annotator.visit(an_ast)
|
Jokymon/hpcs
|
test/test_typeannotation.py
|
Python
|
gpl-3.0
| 3,762
|
[
"VisIt"
] |
0dd1aa54531c61acd14e0636592acd41ea9d524b73054ca1d696e4a0771beb31
|
from Kernel import Kernel
from Gaussian import Gaussian
|
aerialhedgehog/VyPy
|
trunk/VyPy/regression/gpr/kernel/__init__.py
|
Python
|
bsd-3-clause
| 58
|
[
"Gaussian"
] |
07e3136878159303e0ca31f6b38a19398ed96ab8351b4dd264e903569d8f4505
|
# $HeadURL$
""" ElementStatus base class for the helpers
Each RSS Status helper extends this class, providing four methods per cache.
* get<elementType>Statuses
* get<elementType>Status
* isUsable<elementType>
* getUsable<elementType>
"""
# DIRAC
from DIRAC import gLogger, S_ERROR, S_OK
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
__RCSID__ = '$Id: $'
class ElementStatus( object ):
"""
ElementStatus class used by SiteStatus, ResourceStatus and NodeStatus helpers
in the RSS clients. This base class will query the get<elementType>Statuses
method from the child classes, and build its responses with its output.
"""
def __init__( self ):
"""
Constructor. Initializes logger and ResourceStatusClient.
"""
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.rssClient = ResourceStatusClient()
def getElementStatuses( self, elementType, elementNames, statusTypes ):
"""
Method that gets from the extended class the get<elementType>Statuses method
and runs it. This method always requires two parameters, elementNames and
statusTypes. Returns its output.
:Parameters:
**elementType** - `string`
name of the elementType of the cache ( Site, ComputingElement,... ) used
to query get<elementType>Statuses
**elementNames** - [ None, `string`, `list` ]
name(s) of the elements to be matched
**statusTypes** - [ None, `string`, `list` ]
name(s) of the statusTypes to be matched
:return: S_OK() || S_ERROR()
"""
try:
result = getattr( self, 'get%sStatuses' % elementType )( elementNames, statusTypes )
except AttributeError:
return S_ERROR( "Error calling get%sStatuses" % elementType )
return result
def getElementStatus( self, elementType, elementName, statusType ):
"""
Given a elementName and a statusType, it returns its status from the cache
corresponding to elementType.
:Parameters:
**elementType** - `string`
name of the elementType of the cache ( Site, ComputingElement,... ) used
to query get<elementType>Statuses
**elementName** - `string`
name of the element to be matched
**statusType** - `string`
name of the statusType to be matched
:return: S_OK() || S_ERROR()
"""
if not isinstance( elementName, str ):
self.log.error( "getElementStatus expects str for elementName" )
return S_ERROR( "getElementStatus expects str for elementName" )
if not isinstance( statusType, str ):
self.log.error( "getElementStatus expects str for statusType" )
return S_ERROR( "getElementStatus expects str for statusType" )
elementStatus = self.getElementStatuses( elementType, elementName, statusType )
if not elementStatus[ 'OK' ]:
self.log.error( elementStatus[ 'Message' ] )
return elementStatus
return S_OK( elementStatus[ 'Value' ][ elementName ][ statusType ] )
def isUsableElement( self, elementType, elementName, statusType ):
"""
Similar method to getElementStatus. The difference is the output.
Given an element name, returns a bool if the element is usable:
status is Active or Degraded outputs True
anything else outputs False
:Parameters:
**elementType** - `string`
name of the elementType of the cache ( Site, ComputingElement,... ) used
to query get<elementType>Statuses
**siteName** - `string`
name of the site to be matched
**statusType** - `string`
name of the statusType to be matched
:return: S_OK() || S_ERROR()
"""
self.log.debug( ( elementName, statusType ) )
elementStatus = self.getElementStatus( elementType, elementName, statusType )
if not elementStatus[ 'OK' ]:
self.log.error( elementStatus[ 'Message' ] )
return False
if elementStatus[ 'Value' ] in ( 'Active', 'Degraded' ):
self.log.debug( 'IsUsable' )
return True
self.log.debug( 'Is NOT Usable' )
return False
def getUsableElements( self, elementType, statusType ):
"""
For a given statusType, returns all elements that are usable: their status
for that particular statusType is either Active or Degraded; in a list.
:Parameters:
**elementType** - `string`
name of the elementType of the cache ( Site, ComputingElement,... ) used
to query get<elementType>Statuses
**statusType** - `string`
name of the statusType to be matched
:return: S_OK() || S_ERROR()
"""
if not isinstance( statusType, str ):
self.log.error( "getUsableElements expects str for statusType" )
return S_ERROR( "getUsableElements expects str for statusType" )
elementStatuses = self.getElementStatuses( elementType, None, statusType )
if not elementStatuses[ 'OK' ]:
self.log.error( elementStatuses )
return elementStatuses
elementStatuses = elementStatuses[ 'Value' ]
self.log.debug( elementStatuses )
usableElements = []
for elementName, statusDict in elementStatuses.items():
if statusDict[ statusType ] in ( 'Active', 'Degraded' ):
usableElements.append( elementName )
return S_OK( usableElements )
def getUnusableElements( self, elementType, statusType ):
"""
For a given statusType, returns all elements that are unusable: their status
for that particular statusType is either Banned or Probing; in a list.
:Parameters:
**elementType** - `string`
name of the elementType of the cache ( Site, ComputingElement,... ) used
to query get<elementType>Statuses
**statusType** - `string`
name of the statusType to be matched
:return: S_OK() || S_ERROR()
"""
if not isinstance( statusType, str ):
self.log.error( "getUnusableElements expects str for statusType" )
return S_ERROR( "getUnusableElements expects str for statusType" )
elementStatuses = self.getElementStatuses( elementType, None, statusType )
if not elementStatuses[ 'OK' ]:
self.log.error( elementStatuses )
return elementStatuses
elementStatuses = elementStatuses[ 'Value' ]
self.log.debug( elementStatuses )
unusableElements = []
for elementName, statusDict in elementStatuses.items():
if statusDict[ statusType ] in ( 'Banned', 'Probing', 'Error', 'Unknown' ):
unusableElements.append( elementName )
return S_OK( unusableElements )
#...............................................................................
@staticmethod
def getCacheDictFromRawData( rawList ):
"""
Formats the raw data list, which we know it must have tuples of three elements.
( element1, element2, element3 ) into a list of tuples with the format
( ( element1, element2 ), element3 ). Then, it is converted to a dictionary,
which will be the new Cache.
It happens that element1 is elementName, element2 is statusType and element3
is status.
:Parameters:
**rawList** - `list`
list of three element tuples [( element1, element2, element3 ),... ]
:return: dict of the form { ( elementName, statusType ) : status, ... }
"""
res = [ ( ( name, sType ), status ) for name, sType, status in rawList ]
return dict( res )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
avedaee/DIRAC
|
ResourceStatusSystem/Utilities/ElementStatus.py
|
Python
|
gpl-3.0
| 7,799
|
[
"DIRAC"
] |
72f4d985cfc33a95af18e29bed244238e9ff9a49604ab44cff8cb977e5d80c51
|
# Copyright: (c) 2017, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
from operator import attrgetter
from ansible import constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.inventory.host import Host
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.loader import vars_loader
from ansible.utils.vars import combine_vars
from ansible.utils.display import Display
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
display = Display()
INTERNAL_VARS = frozenset(['ansible_diff_mode',
'ansible_config_file',
'ansible_facts',
'ansible_forks',
'ansible_inventory_sources',
'ansible_limit',
'ansible_playbook_python',
'ansible_run_tags',
'ansible_skip_tags',
'ansible_verbosity',
'ansible_version',
'inventory_dir',
'inventory_file',
'inventory_hostname',
'inventory_hostname_short',
'groups',
'group_names',
'omit',
'playbook_dir', ])
class InventoryCLI(CLI):
''' used to display or dump the configured inventory as Ansible sees it '''
ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
'group': 'The name of a group in the inventory, relevant when using --graph', }
def __init__(self, args):
super(InventoryCLI, self).__init__(args)
self.vm = None
self.loader = None
self.inventory = None
def init_parser(self):
super(InventoryCLI, self).init_parser(
usage='usage: %prog [options] [host|group]',
epilog='Show Ansible inventory information, by default it uses the inventory script JSON format')
opt_help.add_inventory_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_basedir_options(self.parser)
# remove unused default options
self.parser.add_argument('-l', '--limit', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument, nargs='?')
self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
self.parser.add_argument('args', metavar='host|group', nargs='?')
# Actions
action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
action_group.add_argument("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
help='create inventory graph, if supplying pattern it must be a valid group name')
self.parser.add_argument_group(action_group)
# graph
self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
help='Use YAML format instead of default JSON, ignored for --graph')
self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
help='Use TOML format instead of default JSON, ignored for --graph')
self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
help='Add vars to graph display, ignored unless used with --graph')
# list
self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
help="When doing an --list, represent in a way that is optimized for export,"
"not as an accurate representation of how Ansible has processed it")
self.parser.add_argument('--output', default=None, dest='output_file',
help="When doing --list, send the inventory to a file instead of to the screen")
# self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
# help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
def post_process_args(self, options):
options = super(InventoryCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options)
# there can be only one! and, at least, one!
used = 0
for opt in (options.list, options.host, options.graph):
if opt:
used += 1
if used == 0:
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
elif used > 1:
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
# set host pattern to default if not supplied
if options.args:
options.pattern = options.args
else:
options.pattern = 'all'
return options
def run(self):
super(InventoryCLI, self).run()
# Initialize needed objects
self.loader, self.inventory, self.vm = self._play_prereqs()
results = None
if context.CLIARGS['host']:
hosts = self.inventory.get_hosts(context.CLIARGS['host'])
if len(hosts) != 1:
raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
myvars = self._get_host_variables(host=hosts[0])
# FIXME: should we template first?
results = self.dump(myvars)
elif context.CLIARGS['graph']:
results = self.inventory_graph()
elif context.CLIARGS['list']:
top = self._get_group('all')
if context.CLIARGS['yaml']:
results = self.yaml_inventory(top)
elif context.CLIARGS['toml']:
results = self.toml_inventory(top)
else:
results = self.json_inventory(top)
results = self.dump(results)
if results:
outfile = context.CLIARGS['output_file']
if outfile is None:
# FIXME: pager?
display.display(results)
else:
try:
with open(to_bytes(outfile), 'wt') as f:
f.write(results)
except (OSError, IOError) as e:
raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
exit(0)
exit(1)
@staticmethod
def dump(stuff):
if context.CLIARGS['yaml']:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
elif context.CLIARGS['toml']:
from ansible.plugins.inventory.toml import toml_dumps, HAS_TOML
if not HAS_TOML:
raise AnsibleError(
'The python "toml" library is required when using the TOML output format'
)
results = toml_dumps(stuff)
else:
import json
from ansible.parsing.ajson import AnsibleJSONEncoder
results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True)
return results
def _get_group_variables(self, group):
# get info from inventory source
res = group.get_vars()
# Always load vars plugins
res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
if context.CLIARGS['basedir']:
res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
if group.priority != 1:
res['ansible_group_priority'] = group.priority
return self._remove_internal(res)
def _get_host_variables(self, host):
if context.CLIARGS['export']:
# only get vars defined directly host
hostvars = host.get_vars()
# Always load vars plugins
hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
if context.CLIARGS['basedir']:
hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
else:
# get all vars flattened by host, but skip magic hostvars
hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
return self._remove_internal(hostvars)
def _get_group(self, gname):
group = self.inventory.groups.get(gname)
return group
@staticmethod
def _remove_internal(dump):
for internal in INTERNAL_VARS:
if internal in dump:
del dump[internal]
return dump
@staticmethod
def _remove_empty(dump):
# remove empty keys
for x in ('hosts', 'vars', 'children'):
if x in dump and not dump[x]:
del dump[x]
@staticmethod
def _show_vars(dump, depth):
result = []
if context.CLIARGS['show_vars']:
for (name, val) in sorted(dump.items()):
result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
return result
@staticmethod
def _graph_name(name, depth=0):
if depth:
name = " |" * (depth) + "--%s" % name
return name
def _graph_group(self, group, depth=0):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
for kid in sorted(group.child_groups, key=attrgetter('name')):
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
result.append(self._graph_name(host.name, depth))
result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
result.extend(self._show_vars(self._get_group_variables(group), depth))
return result
def inventory_graph(self):
start_at = self._get_group(context.CLIARGS['pattern'])
if start_at:
return '\n'.join(self._graph_group(start_at))
else:
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
def json_inventory(self, top):
seen = set()
def format_group(group):
results = {}
results[group.name] = {}
if group.name != 'all':
results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
results[group.name]['children'].append(subgroup.name)
if subgroup.name not in seen:
results.update(format_group(subgroup))
seen.add(subgroup.name)
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty(results[group.name])
if not results[group.name]:
del results[group.name]
return results
results = format_group(top)
# populate meta
results['_meta'] = {'hostvars': {}}
hosts = self.inventory.get_hosts()
for host in hosts:
hvars = self._get_host_variables(host)
if hvars:
results['_meta']['hostvars'][host.name] = hvars
return results
def yaml_inventory(self, top):
seen = []
def format_group(group):
results = {}
# initialize group + vars
results[group.name] = {}
# subgroups
results[group.name]['children'] = {}
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name != 'all':
results[group.name]['children'].update(format_group(subgroup))
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
for h in sorted(group.hosts, key=attrgetter('name')):
myvars = {}
if h.name not in seen: # avoid defining host vars more than once
seen.append(h.name)
myvars = self._get_host_variables(host=h)
results[group.name]['hosts'][h.name] = myvars
if context.CLIARGS['export']:
gvars = self._get_group_variables(group)
if gvars:
results[group.name]['vars'] = gvars
self._remove_empty(results[group.name])
return results
return format_group(top)
def toml_inventory(self, top):
seen = set()
has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
def format_group(group):
results = {}
results[group.name] = {}
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name == 'ungrouped' and not has_ungrouped:
continue
if group.name != 'all':
results[group.name]['children'].append(subgroup.name)
results.update(format_group(subgroup))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
if host.name not in seen:
seen.add(host.name)
host_vars = self._get_host_variables(host=host)
else:
host_vars = {}
try:
results[group.name]['hosts'][host.name] = host_vars
except KeyError:
results[group.name]['hosts'] = {host.name: host_vars}
if context.CLIARGS['export']:
results[group.name]['vars'] = self._get_group_variables(group)
self._remove_empty(results[group.name])
if not results[group.name]:
del results[group.name]
return results
results = format_group(top)
return results
|
roadmapper/ansible
|
lib/ansible/cli/inventory.py
|
Python
|
gpl-3.0
| 15,528
|
[
"Brian"
] |
edd17f7cce1f6dc9067a57ea40896aaee2ced6cd96e26e33cfa068ab34ff4554
|
''' http://www.biopython.org/DIST/docs/api/Bio.KDTree.KDTree%27-module.html '''
def photoz(list):
import sys, pyfits, os
file = os.environ['sne'] + '/cosmos/cosmos_zphot_mag25.nums.fits'
hdulist = pyfits.open(file)
table = hdulist["OBJECTS"].data
r = []
for i in list[0]:
r.append(table.field('zp_best')[i])
print r
import pylab, scipy
a = scipy.array(r)
a, b, varp = pylab.hist(a,bins=scipy.arange(0,4,0.05))
pylab.xlabel("Z")
pylab.ylabel("Number of Galaxies")
pylab.show()
raw_input()
return
def tree(length):
import sys, pyfits, os
#caltable = '/tmp/' + cluster + 'output.cat' #sys.argv[1]
#print cluster, caltable
#hdulist = pyfits.open(caltable)
#table = hdulist["OBJECTS"].data
from scipy.spatial import KDTree
file = os.environ['sne'] + '/cosmos/cosmos_zphot_mag25.nums.fits'
hdulist = pyfits.open(file)
table = hdulist["OBJECTS"].data
array = []
cols = []
for filter in ['umag','bmag','vmag','gmag','rmag','imag','zmag']: #,'icmag','jmag','kmag']:
print hdulist['OBJECTS'].columns
for column in hdulist['OBJECTS'].columns:
if filter == column.name:
print column.format
cols.append(pyfits.Column(name=filter,format=column.format,array=hdulist['OBJECTS'].data.field(filter)[0:length]))
print cols
tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
import scipy
p = scipy.array([[tbhdu.data[2200][i] for i in range(7)]])
print p
#return KDTree(p)
hdu = pyfits.PrimaryHDU()
thdulist = pyfits.HDUList([hdu,tbhdu])
#os.system('rm temp.fits')
#thdulist.writeto('temp.fits')
import numpy
sarray = numpy.asarray(tbhdu.data.tolist())
print numpy.shape(sarray)
a = KDTree(sarray)
return a
|
deapplegate/wtgpipeline
|
non_essentials/kdtree/kdtree2.py
|
Python
|
mit
| 1,929
|
[
"Biopython"
] |
af4377ad39855892f70c5e3380988e0d23c0669ab0541f98200313e8bb97c0f2
|
"""Module to test the output of the Mirth Connect channels
NB - this module does NOT provoke actual processing of the test set,
but relies on a persisted copy of the post process database to be
available. To create said file - run::
process_testfiles_via_mirth
(and go get some coffee while you wait...)
The test files should not contain ANY sensitive data. All test files
should either be hand generated from test data, or scrubbed by a process
such as pheme.anonymize
"""
from datetime import datetime
import os
import re
import unittest
from sqlalchemy import select, and_
from pheme.util.config import Config
from pheme.util.pg_access import db_connection
from pheme.warehouse.tables import *
from pheme.warehouse.tests.process_testfiles import MirthInteraction
def setup_module():
"""Populate database with test data for module tests"""
c = Config()
if c.get('general', 'in_production'): # pragma: no cover
raise RuntimeError("DO NOT run destructive test on production system")
"Pull in the filesystem dump from a previous mirth run"
mi = MirthInteraction()
mi.restore_database()
"Run a quick sanity check, whole module requires a populated db"
connection = db_connection('warehouse')
count = connection.session.query(HL7_Msh).count()
connection.disconnect()
if count < 4000:
err = "Minimal expected count of records not present. "\
"Be sure to run 'process_testfiles_via_mirth' as a prerequisite"
raise RuntimeError(err)
class SQATest(unittest.TestCase):
"""Common SQLAlchemy Database connection management"""
def setUp(self):
super(SQATest, self).setUp()
self.connection = db_connection('warehouse')
self.session = self.connection.session
def tearDown(self):
super(SQATest, self).tearDown()
self.connection.disconnect()
class TestHl7Msh(SQATest):
def testMessageControlId(self):
"Confirm a few known ids made it"
knownIds = ['04296.6762.70.327884.81.7.6110',
'042.974.4.243009.640.167946.62',
'043.032.77067.967.56.4636.7304',
'0.431.202.76464.03.5.50.91.708',
'043.193.87303.07197.642.336.7.',
'043.199.72.771707.5.3.702859.7',
'043.20145.3.1453.698816.3711.9',
'04.32.210.959.093.4864.41.2045', ]
query = self.session.query(HL7_Msh).filter(
HL7_Msh.message_control_id.in_(knownIds))
self.assertEquals(query.count(), len(knownIds))
def testDuplicateMessageControlIds(self):
"duplicate message control ids should only be imported once"
# test set has these duplicates
dup_ids = ('882379.1.564582.7826.8.77.626.',
'4.67200.60.28351.67009.683.7.4',
'794.8678.94.3702.1785.63.6925.')
for id in dup_ids:
query = self.session.query(HL7_Msh).filter(
HL7_Msh.message_control_id==id)
self.assertEquals(1, query.count())
def testMessageType(self):
"All message types should be saved in the database"
types = ["ADT^A03^ADT_A03",
"ADT^A04^ADT_A01",
"ADT^A08^ADT_A01", ]
for type in types:
query = self.session.query(HL7_Msh)
query.filter(HL7_Msh.message_type==type)
self.assert_(query.count() > 0,
"HL7_MSH.message_type missing %s" % type)
def testFacility(self):
"Every row should have a facility, with a facility_lookup value"
query = self.session.query(HL7_Msh).filter(HL7_Msh.facility is None)
self.assert_(query.count() == 0,
'HL7_MSH.facility was blank for at least id %s'
% getattr(query.first(), 'hl7_msh_id', False))
def testMessageDatetimeNotNull(self):
"All rows expected to have date time"
query = self.session.query(HL7_Msh).\
filter(HL7_Msh.message_datetime is None)
self.assert_(query.count() == 0, 'HL7_MSH.message_datetime '
'was blank for id %s' % getattr(query.first(),
'hl7_msh_id', ''))
class TestMU(SQATest):
def test_mu_data(self):
# Test a number of values from a MU test file 'Bfbjpo'
id = '2.6.21919.99289.858698.379.23.'
query = self.session.query(HL7_Msh).\
filter(HL7_Msh.message_control_id==id)
msh_row = query.one()
self.assertEquals('3768573961', msh_row.facility)
# Confirm all visit row data matchs the source
query = self.session.query(HL7_Visit).\
filter(HL7_Visit.hl7_msh_id==msh_row.hl7_msh_id)
visit_row = query.one()
self.assertEquals('761339^^^&3768573961&NPI',
visit_row.patient_id)
self.assertEquals('358798^^^&3768573961&NPI',
visit_row.visit_id)
self.assertEquals('99304', visit_row.zip)
self.assertEquals(datetime.strptime('32460528115833',
'%Y%m%d%H%M%S'),
visit_row.admit_datetime)
self.assertEquals('M', visit_row.gender)
self.assertEquals('Seizure', visit_row.chief_complaint)
self.assertEquals('I', visit_row.patient_class)
self.assertEquals('06', visit_row.disposition)
self.assertEquals('White', visit_row.race)
self.assertEquals('071', visit_row.county)
self.assertEquals('9', visit_row.admission_source)
self.assertEquals(datetime.strptime('32460530120533',
'%Y%m%d%H%M%S'),
visit_row.discharge_datetime)
def testMessageDatetime(self):
"Confirm a known date from test file 'Rrliqv' is present"
# 32421213093537 -> 3242/12/13 09:35:37
lookfor = datetime(3242,12,13,9,35,37)
query = self.session.query(HL7_Msh).filter(
HL7_Msh.message_datetime == lookfor)
self.assert_(query.count() > 0, 'HL7_MSH.message_datetime '
'%s was missing' % lookfor)
def testBatchFilename(self):
"Should find at least one entry for every file in the set."
mi = MirthInteraction()
for f in mi.filenames:
file = os.path.basename(f)
query = self.session.query(HL7_Msh).filter(
HL7_Msh.batch_filename == file)
self.assert_(query.count() > 0, 'HL7_MSH.batch_filename '
'%s was missing' % file)
class TestHl7Visit(SQATest):
def testMshId(self):
"The hl7_msh_id foreign key should be present"
query = self.session.query(HL7_Visit)
for visit in query.all():
self.assertTrue(visit.hl7_msh_id > 0,
"hl7_msh_id foreign key not set in HL7_VISIT")
def testVisitId(self):
"Format should include a visit_id and the assigning authority"
query = self.session.query(HL7_Visit)
# Format should include a visit id and the assigning authority
known_authorities = ['&3768573961&NPI',
'&650903.98473.0179.6039.1.333.1&ISO',
'&7281.82.5411.3.2.32886.2.7795.&ISO']
pat = re.compile("(\d+)\^\^\^(.*)")
self.assert_(query.count() > 0, "HL7_VISIT has no data")
for visit in query.all():
match = re.search(pat, visit.visit_id)
self.assert_(match.group(1) > 0,
'visit_id contains bogus visit id: %s' %
visit.visit_id )
self.assert_(match.group(2) in known_authorities,
'visit_id missing expected authority: %s' %
visit.visit_id )
def testPatientId(self):
"Format should include a patient_id and the assigning authority"
query = self.session.query(HL7_Visit)
# Format should include a visit id and the assigning authority
known_authorities = ['&3768573961&NPI',
'&650903.98473.0179.6039.1.333.1&ISO',
'&7281.82.5411.3.2.32886.2.7795.&ISO']
pat = re.compile("(\d+)\^\^\^(.*)")
self.assert_(query.count() > 0, "HL7_VISIT has no data")
for visit in query.all():
match = re.search(pat, visit.patient_id)
self.assert_(match.group(1) > 0,
'patient_id contains bogus visit id: %s' %
visit.patient_id )
self.assert_(match.group(2) in known_authorities,
'patient_id missing expected authority: %s' %
visit.patient_id )
def testZip(self):
"Zips should be 3 or 5 digits"
query = self.session.query(HL7_Visit).filter(HL7_Visit.zip <> None)
# Expect every zip to be 3 OR 5 digits for US zipcodes.
pat = re.compile("^\d*$")
for visit in query.all():
if visit.country == 'CAN':
continue # i.e. T2T1E41 ...
self.assert_(len(visit.zip) == 3 or \
len(visit.zip) == 5,
'zip is the wrong length, '
'should be 3 or 5 digits: %s %s' %
(visit.zip,visit.country))
self.assert_(re.search(pat, visit.zip),
'zip %s contains non integers' % visit.zip)
def testAdmitDatetime(self):
"Confirm a few random known dates from test file 'Rrliqv' are present"
dates = [datetime(3242,12,03,01,33,42),
datetime(3242,12,13,9,33,36),
datetime(3242,8,23,5,34,42)]
for d in dates:
query = self.session.query(HL7_Visit).filter(HL7_Visit.admit_datetime == d)
self.assert_(query.count() >= 1,
"admit_datetime missing '%s'" % d)
def testDischargeDatetime(self):
"Confirm a few random known dates from test file 'Rrliqv' are present"
dates = (datetime(3242,12,01,07,33,35),
datetime(3242,12,06,06,30,42),
datetime(3242,07,15,12,31,33),)
for d in dates:
query = self.session.query(HL7_Visit).filter(
HL7_Visit.discharge_datetime == d)
self.assert_(query.count() >= 1,
"discharge_datetime missing '%s'" % d)
def testGender(self):
"Valid genders are M, F, U, and O"
query = self.session.query(HL7_Visit).filter(HL7_Visit.gender <> None)
valid = ['M','F','U','O']
self.assert_(query.count() > 0, "HL7_VISIT.gender in error")
for visit in query.all():
self.assert_(visit.gender in valid,
'gender %s not valid' % visit.gender)
def testDOB(self):
"Dates of birth should be in the YYYYMM format"
query = self.session.query(HL7_Visit).filter(HL7_Visit.dob <> None)
self.assert_(query.count() > 0, "HL7_VISIT.gender in error")
pat = re.compile("^\d{6}$")
for visit in query.all():
match = re.search(pat, visit.dob)
self.assert_(match,
"dob %s doesn't match YYYYMM format" % visit.dob)
def testChiefComplaint(self):
"A few random expected values from test file 'Rrliqv'"
complaints = ["WEAKNESS,PAIN",
"PYLORIC STENOSIS",
"DIFFICULTY BREATHING",
"LIP LACERATION",
"COLONOSCOPY DIAGNOSTIC",
"RECTAL BLEED WITH OVER COAGULATION",]
for c in complaints:
query = self.session.query(HL7_Visit).filter(
HL7_Visit.chief_complaint == c)
self.assert_(query.count() > 0,
"chief_complaint missing %s" % c)
def testPatientClass(self):
"Valid patient classes are E, I, O and U"
valid = ['E','I','O','U']
query = self.session.query(HL7_Visit).filter(
HL7_Visit.patient_class <> None)
self.assert_(query.count() > 0, "HL7_VISIT.patient_class in error")
for visit in query:
self.assert_(visit.patient_class in valid, "patient_class "
"'%s' not valid" % visit.patient_class)
def testDisposition(self):
"""Check for "standardized disposition code" - a 2 digit number"""
sel = select((HL7_Visit.disposition,)).distinct()
query = self.session.execute(sel)
pat = re.compile("^\d\d$")
for disposition in query.fetchall():
if not disposition[0]: continue
self.assert_(re.match(pat, disposition[0]),
"disposition '%s'doesn't look valid" % disposition[0])
def testChiefComplaintWithQuotes(self):
"Bug fix from a cheif complaint with multiple quotes"
complaint = "COUGH,'VERY SICK'"
query = self.session.query(HL7_Visit).filter(HL7_Visit.chief_complaint == complaint)
self.assert_(query.count() > 0,
"chief_complaint missing %s" % complaint)
def testCounty(self):
"County field should be persisted"
counties_in_test_set = ('ADA-WA','ASO-WA','BEN-ID','BEN-WA',
'BONN-ID','COW-WA','FER-WA','FRA-WA',
'GRA-WA','KIN-WA','KOO-ID','LAT-ID',
'LIN-MT','LIN-WA','MIS-MT','NEZPE-ID',
'OKA-WA','PENOR-WA','SHO-ID','SPO-WA',
'STE-WA','UMA-OR','WALWA-WA','WHI-WA',
'YAK-WA',)
query = self.session.query(HL7_Visit.county).distinct()
counties_in_results = [row[0] for row in query]
self.assertTrue(len(counties_in_results) >=
len(counties_in_test_set))
for c in counties_in_test_set:
if c not in counties_in_results:
self.fail('%s not found in db' %c)
def testRaceEthnicity(self):
"Race field should be persisted"
race_eth_in_test_set = ('American Indian or Alaska Native',
'Asian',
'Black or African American',
'Native Hawaiian or Other Pacific Islander',
'White',
'Other Race',
'Hispanic or Latino',)
query = self.session.query(HL7_Visit.race).distinct()
race_in_results = [row[0] for row in query]
self.assertTrue(len(race_in_results) >=
len(race_eth_in_test_set))
for r in race_eth_in_test_set:
if r not in race_in_results:
self.fail('%s not found in db' %r)
def testServiceCode(self):
"Service codes"
codes_in_test_set = ('CAN', 'CAR', 'INT', 'NBI', 'OBG', 'OBS',
'OTH', 'PED', 'PHY', 'PIN', 'SUR',)
query = self.session.query(HL7_Visit.service_code).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(codes_in_test_set))
for s in codes_in_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testServiceAltId(self):
"Service Alternate Identifier"
codes_in_test_set = ('CCU', 'GYNE', 'ICU', 'MEDI', 'MEDN',
'MEDO', 'MEDR', 'NBIC', 'NBTP', 'NES',
'NEU', 'NURS', 'OBS', 'OBV', 'OHS',
'OPRA', 'OPRC', 'ORTH', 'OTRT', 'PEDI',
'PEDO', 'PIC', 'PLSU', 'SURG', 'UROG',)
query = self.session.query(HL7_Visit.service_alt_id).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(codes_in_test_set))
for s in codes_in_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testAdmissionSource(self):
"Admission Source"
codes_in_test_set = ('1','2','4','5','6','7','8','9','D')
query = self.session.query(HL7_Visit.admission_source).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(codes_in_test_set))
for s in codes_in_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testAssignedLocation(self):
"Assigned Patient Location"
locations_in_test_set = (
'2N', '2S', '4N', '4S', '5N', '5S', '6N', '6S', '7N',
'7S', '8N', '8NURS', '8S', 'ACCS', 'ACUI', 'AN', 'BLU', 'BP',
'BPNB', 'CARA', 'CARO', 'CON', 'D10TW', 'D11TW', 'DACU7',
'DBS', 'DCCU', 'DCSSU', 'DCT', 'DDSC', 'DEHC', 'DER', 'DEX',
'DHMRI', 'DMAM', 'DMLHEC', 'DMLL', 'DMLMB', 'DMLS', 'DOBD',
'DOBM', 'DPUSD', 'DRWCLAB', 'DSSU', 'DSURG', 'DULT', 'EC',
'EDX', 'EEG', 'EMR', 'EN', 'END', 'ENDO', 'ER', 'ER2', 'ERM',
'ES', 'FM', 'FMCI', 'FST', 'FST2', 'GRN', 'HLBW', 'HRAG',
'ICU', 'KPTV', 'KTP', 'LAB', 'LABSJH', 'LBW', 'LD', 'LSDO',
'MAM', 'MEDIW', 'MRI', 'NBN', 'NICU', 'NSYI', 'NUR', 'OB',
'OBG', 'ONM', 'OPP', 'OPRA', 'OPSD', 'P9N', 'PDIM', 'PDIS',
'PDON', 'PEDO', 'PEDS', 'PFC', 'PICU', 'PM', 'PMAC', 'PMC',
'PMCLAB', 'PMCURC', 'POLAB', 'PRP', 'RAC', 'RAD', 'RAG',
'RAM', 'RAS', 'RAU', 'RED', 'RHC', 'ROOV', 'RSP', 'SCSG',
'SDC', 'SDS', 'SERI', 'SHL', 'SMAU', 'SPC', 'SSS', 'SSU',
'SURI', 'TED', 'TED-MC', 'TEMP', 'TXR', 'US', 'VER',
'VMEDSURG', 'VSO', 'WER', 'XRA', 'YELO')
query = self.session.query(HL7_Visit.\
assigned_patient_location).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(locations_in_test_set))
for s in locations_in_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testState(self):
"State"
states_in_test_set = ('AZ','CN','ID','LA','MT','OR','TX',
'WA',)
query = self.session.query(HL7_Visit.state).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(states_in_test_set))
for s in states_in_test_set:
if s not in results:
self.fail('%s not found in db' %s)
class TestHl7Dx(SQATest):
def testDxRank(self):
"Confirm rank is sticking on all dx messages"
query = self.session.query(HL7_Dx)
for dx in query:
self.assert_(dx.rank > 0) # the default
def testDxCode(self):
"Look up a few known dx_codes from test set"
codes = ["787.01","847.0","850.9","920","922.31","923.00","996.73",
"E879.1","E885.9","E928.9","E947.8","E968.8","V09.0","V12.04",
"V22.1","V27.0",]
for c in codes:
query = self.session.query(HL7_Dx).filter(HL7_Dx.dx_code == c)
self.assert_(query.count() >= 1, "dx_code missing value '%s'" % c)
def testDxDescription(self):
"Look up a few known dx_descriptions from test set"
descripts = ["OTH LYMPHOMAS EXTRANODAL SOLID ORGAN UNSPECIFIED",
"DIAB MELL WO COMPL, TYPE II OR UNSPEC TYPE, NOT UN",
"DIAB W RENAL MANIFEST, TYPE II OR UNSPEC TYPE, NOT",
"HYPERLIPIDEMIA NEC/NOS",
"OBESITY, NOS",
"PANCYTOPENIA",
"ANEMIA NEOPLASTIC DIS",
"THROMBOCYTOPENIA NOS",
"SCHIZOPHRENIA NOS-UNSPEC",
"HYPTNSV CHR KID DIS, UNSPEC, W CHR KD STAGE V OR E",
"BRONCHITIS NOS",
"ASTHMA, UNSPECIFIED",
"UTERINE TUMOR-DELIVERED",
"PREV CESAREAN DELIVRY W/ OR W/O MENT ANTEPART COND",
"PREV CESAREAN DELIVERY, ANTEPARTUM COND OR COMPLIC",
"JOINT PAIN-SHLDER",
"CLEFT PALATE LIP NOS",
"OTHER PRETERM INFANTS, 2000-2500 GRAMS",
"FETAL/NEONATAL JAUND NOS",
"DIZZINESS AND GIDDINESS",
"CONCUSSION NOS",
"CONTUSION FACE/SCALP/NCK",]
for d in descripts:
query = self.session.query(HL7_Dx).filter(HL7_Dx.dx_description == d)
self.assert_(query.count() >= 1, "dx_code missing value '%s'" % d)
def testDxType(self):
"""Only three expected types:
A^Admitting^HL70052^A^^L
F^Final^HL70052^F^^L
W^Working^HL70052^W^^L"""
expected = ['A','F','W']
query = self.session.query(HL7_Dx).filter(HL7_Dx.dx_type <> None)
self.assert_(query.count() > 0, "dx_type lacks data")
for dx in query.all():
self.assert_(dx.dx_type in expected,
'dx_type %s is not valid' % dx.dx_type)
def testBackslachInDescription(self):
"Handle backslashes found in the diagnosis description"
# This visit should have 4 diagnoses including one with
# a couple backslashes in the description
query = self.session.query(HL7_Dx).join(
(HL7_Visit, HL7_Dx.hl7_msh_id==HL7_Visit.hl7_msh_id)).\
filter(HL7_Visit.visit_id==\
'093601^^^&650903.98473.0179.6039.1.333.1&ISO')
self.assertEquals(query.count(), 4)
found = False
for dx in query:
if dx.dx_code == '793.99':
self.assertEquals(
dx.dx_description,
"OTH NOSP (ABN) FINDINGS RADIOLOGICAL \T\\ ")
found = True
self.assert_(found, "Missing dx with backslash")
class TestHl7Obr(SQATest):
def testLoincCode(self):
"Look up a few known LOINC codes from test set"
codes = ("600-7", "45187-2")
for c in codes:
query = self.session.query(HL7_Obr).filter(
HL7_Obr.loinc_code == c)
self.assert_(query.count >= 1, "loinc_code '%s' missing" % c)
def testLoincText(self):
"Look up a few known loinc text fields from test set"
expected = ("Bacteria identified:Prid:Pt:Bld:Nom:Culture",
"Antibiotic XXX:Susc:Pt:Isolate:OrdQn:Agar diffusion")
for e in expected:
query = self.session.query(HL7_Obr).filter(
HL7_Obr.loinc_text == e)
self.assert_(query.count() >= 1, "loinc_text '%s' missing" % e)
def testAltText(self):
"Check for expected alt_text"
expected = ("Culture Blood","KB Susceptibility")
for e in expected:
query = self.session.query(HL7_Obr).filter(
HL7_Obr.alt_text == e)
self.assert_(query.count() >= 1, "alt_text '%s' missing"
% e)
def testStatus(self):
stati = ('A','F','P')
query = self.session.query(HL7_Obr.status).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(stati))
for s in stati:
if s not in results:
self.fail('%s not found in db' %s)
def testReportDatetime(self):
"Report datetime"
a_few_from_test_set = (
datetime(3243, 4,13, 3,30,33),
datetime(3243, 4,13,15,34,38),
datetime(3243, 4,18, 9,30,33),
datetime(3244, 7, 4,22,33,33),
datetime(3244, 7, 5, 2,34,40),
datetime(3244, 7, 5, 6,30,33),
datetime(3244, 7, 5, 6,34,39),
datetime(3244, 7, 5, 6,34,40),
datetime(3244, 7, 5, 6,34,41),
datetime(3346,12, 1,22,30,36),
)
query = self.session.query(HL7_Obr.report_datetime).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(a_few_from_test_set))
for s in a_few_from_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testSpecimenSource(self):
"Specimen Source"
a_few_from_test_set = (
'ABD', 'ANTRUM', 'BAL', 'BLUD', 'BRUESO', 'CERVIX',
'CSF', 'DWA', 'DWB', 'FOOT', 'LEG', 'LYM', 'NASAL', 'NP',
'PERITO', 'PF', 'STOOL', 'SWO', 'TF', 'THOFLD', 'THROAT',
'THT', 'UCV', 'UFC', 'UMC', 'UO', 'URICC', 'URINE', 'URISCA',
'VAG', 'VAGINA', 'VAGREC', 'WASHBR',
)
query = self.session.query(HL7_Obr.specimen_source).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(a_few_from_test_set))
for s in a_few_from_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testFillerOrder(self):
"Filler Order Number"
a_few_from_test_set = (
'4.8294.11.7.2601.3.1.58.2.1272',
'7099007569',
'454368.9518.831021.0.522.06960',
'6375556977',
'9937702778',
'5548806817',
'4.41.7.0.922032.304.52.018768.',
)
query = self.session.query(HL7_Obr.filler_order_no).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(a_few_from_test_set))
for s in a_few_from_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testCoding(self):
"Coding"
query = self.session.query(HL7_Obr.coding).filter(
HL7_Obr.coding=='LN')
self.assertTrue(query.count() >= 1300)
def testAltCode(self):
"alt code"
a_few_from_test_set = (
'12HIVR',
'AASGS',
'ABO',
'BC',
'BS',
'HCVRNAQ',
'HEPA',
'HEP-C',
'HIV1RUQ',
'HPS',
'IC',
)
query = self.session.query(HL7_Obr.alt_code).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(a_few_from_test_set))
for s in a_few_from_test_set:
if s not in results:
self.fail('%s not found in db' %s)
def testAltCoding(self):
"Alt Coding"
query = self.session.query(HL7_Obr.alt_coding).filter(
HL7_Obr.alt_coding=='L')
self.assertTrue(query.count() >= 158)
class TestHl7Obx(SQATest):
def testValue(self):
"Check for expected value_types"
expected = ("CE", "TX")
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.value_type == e)
self.assert_(query.count() >= 1, "value_type '%s' missing"
% e)
def testObservationValue(self):
"Check for expected observation values"
observations = [
'Microorganism or agent identified:Prid:Pt:XXX:Nom:',
'Amoxicillin+Clavulanate:Susc:Pt:Isolate:OrdQn:Agar diffusion',
'Cefazolin:Susc:Pt:Isolate:OrdQn:Agar diffusion',
'Ceftazidime:Susc:Pt:Isolate:OrdQn:Agar diffusion']
for ob in observations:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.observation_text == ob)
self.assert_(query.count() > 0,
'observation_text was missing: %s' % ob)
def testObxWithoutObr(self):
"Confirm we're saving obx rows w/o a preceeding obr"
# a couple known values from test set that don't
# show up when only linking w/ hl7_obr
observation_ids = ['43137-9','29553-5']
for id in observation_ids:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.observation_id == id)
self.assert_(query.count() > 0,
'observation_id (one w/o an obr link) '
'was missing: %s' % id)
def testObservationDateTime(self):
"Confirm we're saving the datetime of the observation"
# To date, the only OBX rows w/ date time values are from
# facility info. OBR however frequently shows dates. Make
# sure we are correctly parsing those (using hand scraped
# values from test file Oshroj
"""select observation_datetime from hl7_obr join hl7_msh on
hl7_msh.hl7_msh_id = hl7_obr.hl7_msh_id where
hl7_msh.batch_filename = 'Oshroj'"""
expected_datetimes = (
'3243-04-18 09:34:38',
'3243-04-18 09:34:38',
'3243-04-17 18:30:34',
'3243-04-18 09:30:33',
'3243-04-17 18:30:34',
'3243-04-18 08:30:33',
'3243-04-18 08:30:33',
'3243-04-18 08:30:33',
'3243-04-18 08:30:33',
'3243-04-18 08:30:33',
'3243-04-18 08:30:33',
'3243-04-18 09:35:38',
'3243-04-18 10:31:38',
'3243-04-18 09:35:39',
)
for datetime in expected_datetimes:
query = \
self.session.query(HL7_Obr).filter(HL7_Obr.observation_datetime
== datetime)
self.assert_(query.count() > 0,
'observation_datetime was missing: %s' %
datetime)
def testGeneralOrder(self):
"Look for OBR data from ORM^O01 (general order messages)"
expected_loinc_codes = ('630-4', '664-3', '664-3', '664-3',
'580-1', '543-9', '593-4', '588-4',
'700-5', '600-7', '600-7', '34468-9')
for loinc_code in expected_loinc_codes:
query = self.session.query(HL7_Obr).filter(
HL7_Obr.loinc_code == loinc_code)
self.assert_(query.count() > 0,
'loinc_code (from ORM^O01) was missing: %s' %
loinc_code)
def testPerformingLab(self):
"performing_lab"
for lab in ('Ttvzevrbky', 'Majpsarsbg'):
query = self.session.query(HL7_Obx).\
filter(HL7_Obx.performing_lab_code == lab)
self.assertTrue(query.count() > 0)
def testSequence(self):
"obx.sequence (sub-id)"
# Currently one test matches this, returning 26 related
# hl7_obx statements - good one for sequence check:
dt = datetime.strptime('3244-07-05 06:33:42',
"%Y-%m-%d %H:%M:%S")
query = self.session.query(HL7_Obx).\
join((HL7_Obr,
HL7_Obr.hl7_obr_id==HL7_Obx.hl7_obr_id)).\
filter(and_(HL7_Obr.loinc_code==\
'21020-3', HL7_Obr.report_datetime==dt)).\
order_by(HL7_Obx.hl7_obx_id)
self.assertEquals(query.count(), 19)
last_sd, last_pt = 0,0
for e in query:
sd, pt = e.sequence.split('.')
if int(pt) < last_pt:
self.assertTrue(int(sd) > last_sd)
last_sd = int(sd)
last_pt = int(pt)
def testUnits(self):
"obx.units"
# All these unique unit values were found by hand parsing
# testfile Wpqvqn - confirm their presence
expected = ('Years', 'fl', 'g/dl', 'IU/mL', 'K/CMM', '%',
'M/CMM', 'pg')
query = self.session.query(HL7_Obx.units).distinct()
found = [e[0] for e in query]
for unit in expected:
self.assertTrue(unit in found)
def test_coding(self):
"OBX coding"
expected = ("LN", "NULLFL")
for e in expected:
query = self.session.query(HL7_Obx).filter(HL7_Obx.coding == e)
self.assert_(query.count() >= 1, "coding '%s' missing"
% e)
def test_alt_id(self):
"OBX alt_id"
expected = ('TZP', 'URI*', 'VAN', 'VZS*', 'WBC', 'WBC*', 'WC*',)
for e in expected:
query = self.session.query(HL7_Obx).filter(HL7_Obx.alt_id == e)
self.assert_(query.count() >= 1, "alt_id '%s' missing"
% e)
def test_alt_text(self):
"OBX alt_text"
expected = (
"RBC DISTRIBUTION WIDTH-SD",
"REPORT STATUS",
"RESPIRATORY VIRAL SCREEN",
"Result",
"RH(D)",
"RPR",
"RUBELLA AB, IGG",
"SODIUM",
"SPECIMEN SOURCE",
"Tetracycline",
"TETRACYCLINE",
)
for e in expected:
query = self.session.query(HL7_Obx).filter(HL7_Obx.alt_text == e)
self.assert_(query.count() >= 1, "alt_text '%s' missing"
% e)
def test_alt_coding(self):
"OBX alt_coding"
expected = ("L",)
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.alt_coding == e)
self.assert_(query.count() >= 1, "alt_coding '%s' missing"
% e)
def test_reference_range(self):
"OBX reference_range"
expected = (
"0.6-1.4",
"0.9-2.8",
"10-20",
"1.0-4.8",
"11.0-14.5",
"12.0-16.0",
"12-45",
"<129",
"8-35",
"8.4-10.2",
">9",
"9.4-12.5",
"98-108",
"NEGATIVE",
"NR",
)
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.reference_range == e)
self.assert_(query.count() >= 1, "reference_range '%s' missing"
% e)
def test_abnorm_id(self):
"OBX abnorm_id"
expected = ('H', 'L', 'R', 'S')
for e in expected:
query = self.session.query(HL7_Obx).filter(HL7_Obx.abnorm_id == e)
self.assert_(query.count() >= 1, "abnorm_id '%s' missing"
% e)
def test_abnorm_text(self):
"OBX abnorm_text"
expected = (
"Above high normal",
"Below low normal",
"Resistant. Indicates for microbiology susceptibilities only.",
"Susceptible. Indicates for microbiology susceptibilities only.",
)
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.abnorm_text == e)
self.assert_(query.count() >= 1, "abnorm_text '%s' missing"
% e)
def test_abnorm_coding(self):
"OBX abnorm_coding"
expected = ("HL70078",)
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.abnorm_coding == e)
self.assert_(query.count() >= 1, "abnorm_coding '%s' missing"
% e)
def test_alt_abnorm_id(self):
"OBX alt_abnorm_id"
expected = ('H', 'L', 'R', 'R*', 'S')
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.alt_abnorm_id == e)
self.assert_(query.count() >= 1, "alt_abnorm_id '%s' missing"
% e)
def test_alt_abnorm_text(self):
"OBX alt_abnorm_text"
expected = ('Susceptible. Indicates for microbiology '
'susceptibilities only.',)
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.alt_abnorm_text == e)
self.assert_(query.count() >= 1, "alt_abnorm_text '%s' missing"
% e)
def test_alt_abnorm_coding(self):
"OBX alt_abnorm_coding"
expected = ("L", "HL70078")
for e in expected:
query = self.session.query(HL7_Obx).filter(
HL7_Obx.alt_abnorm_coding == e)
self.assert_(query.count() >= 1, "alt_abnorm_coding '%s' missing"
% e)
class TestHl7Spm(SQATest):
def testSpm(self):
"Check all values from a few known spm rows came through"
sample = '122561005^Blood specimen from patient ' +\
'(specimen)^SN^BLUD^Blood^L'
fields = sample.split('^')
id, description, code = fields[0], fields[1], fields[3]
query = self.session.query(HL7_Spm).\
filter(HL7_Spm.id == id)
self.assertTrue(query.count() >= 1)
descriptions, codes = [], []
for q in query:
descriptions.append(q.description)
codes.append(q.code)
self.assertTrue(description in descriptions)
self.assertTrue(code in codes)
def testSpmCodes(self):
"Confirm all codes came through"
codes = (
'ABD', 'BAL', 'BLUD', 'CERVIX', 'CSF', 'DWA', 'DWB',
'FOOT', 'LEG', 'LYM', 'NASAL', 'NP', 'PERITO', 'PF',
'STOOL', 'SWO', 'TF', 'THROAT', 'THT', 'UCV', 'UFC',
'URICC', 'URINE', 'URISCA', 'VAGINA', 'VAGREC', 'WASHBR',
)
query = self.session.query(HL7_Spm.code).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(codes))
for s in codes:
if s not in results:
self.fail('%s not found in db' %s)
class TestObservationData(SQATest):
def testAssociations(self):
"Confirm OBX and SPM data is available"
lc = '610-6'
v_id = '667570^^^&650903.98473.0179.6039.1.333.1&ISO'
query = self.session.query(ObservationData).\
join((HL7_Visit,
HL7_Visit.hl7_msh_id==HL7_Obr.hl7_msh_id)).\
filter(and_(HL7_Visit.visit_id==v_id,
HL7_Obr.loinc_code==lc))
lab_msg = query.one()
self.assertEquals(len(lab_msg.spms), 1)
self.assertEquals(len(lab_msg.obxes), 2)
def testVisitLookup(self):
"Confirm we can obtain all for a single visit in one query"
v_id = '950039^^^&650903.98473.0179.6039.1.333.1&ISO'
query = self.session.query(ObservationData).\
join((HL7_Visit,
HL7_Visit.hl7_msh_id==HL7_Obr.hl7_msh_id)).\
filter(HL7_Visit.visit_id == v_id)
# Should see two ObservationData, both with obxes
found = 0
for r in query:
self.assertTrue(len(r.obxes) > 0)
found += 1
self.assertEquals(found, 2)
def dontestMegaJoin(self):
v_id = '950039^^^&650903.98473.0179.6039.1.333.1&ISO'
query = self.session.query(ObservationData,HL7_Visit).\
join((HL7_Visit,
HL7_Visit.hl7_msh_id==HL7_Obr.hl7_msh_id),).\
filter(HL7_Visit.visit_id == v_id)
for r,j in query:
print r
print j
def testFullMessageByVisit(self):
"8 MSH for one visit - check a variety of info"
v_id = '405774^^^&650903.98473.0179.6039.1.333.1&ISO'
query = self.session.query(FullMessage).\
join((HL7_Visit,
HL7_Visit.hl7_msh_id==FullMessage.hl7_msh_id),).\
filter(HL7_Visit.visit_id == v_id).\
order_by(FullMessage.message_datetime)
self.assertEquals(query.count(), 8)
def testMessageW9(self):
nine = '467984^^^&650903.98473.0179.6039.1.333.1&ISO'
query = self.session.query(FullMessage).\
join((HL7_Visit,
HL7_Visit.hl7_msh_id==FullMessage.hl7_msh_id),).\
filter(HL7_Visit.visit_id == nine)
self.assertEquals(query.count(), 1)
msg = query.one()
self.assertEquals(len(msg.dxes), 9)
def testIPlabStatus(self):
"OBR 25.1 should be single char, map 'IP' to 'I'"
visit = '472781^^^&650903.98473.0179.6039.1.333.1&ISO'
query = self.session.query(HL7_Obr).\
join((HL7_Visit,
HL7_Visit.hl7_msh_id==HL7_Obr.hl7_msh_id),).\
filter(HL7_Visit.visit_id == visit)
self.assertEquals(query.count(), 1)
msg = query.one()
self.assertEquals(msg.status, 'I')
class TestHl7Nte(SQATest):
def testNte(self):
"Check for existence of expected notes from test set"
notes_in_test_set = (
'Rcmamkdjer',
'Oehgwlpwby',
'Qfvahxhaiy',
'Sotfpqpkyf',
'Bmalwxszmf',
'Melpbusdee',
'Nybhpeyfgz',
'Agrqjnpzlj',
'Jxlnfftcvx',
'Wjkgypfgju',
'Sozcdaxjan',
'Enemiynoyz',
'Pnvzkuuwpt',
'Nhxidnrfrg',
'Tfolwwtfcl',
'Meybschdzi',
'Sixasldidd',
)
query = self.session.query(HL7_Nte.note).distinct()
results = [row[0] for row in query]
self.assertTrue(len(results) >= len(notes_in_test_set))
for s in notes_in_test_set:
if s not in results:
self.fail('%s not found in db' %s)
|
pbugni/pheme.warehouse
|
pheme/warehouse/tests/test_mirth_processing.py
|
Python
|
bsd-3-clause
| 41,610
|
[
"VisIt"
] |
eae9513556ed369317b94443ba92e44b6c8778d6f1edd57e3472afca87fa69fa
|
#!/usr/bin/env python
# Copyright (C) 2006 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a full list
# of copyright holders.
#
# Prof. C Pain
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# amcgsoftware@imperial.ac.uk
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import mayavi
import getopt, sys, os, time
import mayavi.Common, mayavi.Base.Objects, mayavi.Base.ModuleManager, mayavi.Base.DataVizManager
import mayavi.Sources.VtkDataReader, mayavi.Sources.PLOT3DReader, mayavi.Sources.VRMLImporter
import mayavi.Sources.mv3DSImporter, mayavi.Sources.VtkData
import string
def usage ():
msg="""Usage:\n\nfl_mayavi_animate -f <fl-file> [-z <mv-file>] [options] <start> <finish>
Where <start> <finish> are the first and last dump-id's of the
files to be rendered and <mv-file> is the MayaVi visualization
file to be used.
Valid options are one or more of the following:
-s
--fps
Frames per second in output animation (default: 5)
-f
--flfile
Fluidity file
-z
--vizfile
The MayaVi visualization file that is to be used for all the
data-files
-r
--recycle
Recycles existing frames - doesn't overwrite
--gif
Generate animated gif (default)
--png
Generate pngs but no movie (gif or avi)
--avi
Generate AVI file
"""
return msg
def get_file (root, id):
# Check for vtk file
if os.path.isfile( root+"_%d"%id+".vtk" ):
return root+"_%d"%id+".vtk"
# Check for vtu file
if os.path.isfile( root+"_%d"%id+".vtu" ):
return root+"_%d"%id+".vtu"
# Check for pvtu file
if os.path.isfile( root+"_%d"%id+".pvtu" ):
return root+"_%d"%id+".pvtu"
raise Exception("id " + str(id) + " not found")
def main ():
try:
opts, args = getopt.getopt(sys.argv[1:], "hf:z:rs:", ["help", "flfile", "vizfile=", "", "recycle", "gif", "avi", "fps", "png"])
except getopt.GetoptError:
# print help information and exit:
print "ERROR: Bad arguments!"
print usage()
sys.exit(2)
# Collect options.
recycle = 0
flfile = None
mvfile = None
movie = "gif"
fps = 5
maximize = False
for o, a in opts:
if o in ("-h", "--help"):
print usage()
sys.exit()
if o in ("-z", "--vizfile"):
mvfile = a
if o in ("-r", "--recycle"):
recycle = 1
if o in ("-f", "--flfile"):
flfile = a
if o == "--avi":
movie = "avi"
if o == "--png":
movie = "non"
if o in ("-s", "--fps"):
try:
fps = float(a)
assert(fps > 0.0)
except:
print "ERROR: Bad arguments!"
print usage()
sys.exit(2)
# Check for fluidity file
if not flfile:
print "ERROR: No fluidity file given."
print usage()
sys.exit()
# Check for MayaVi visualisation file
if not mvfile:
# Guess
mvfile = flfile+".mv"
if not os.path.isfile( mvfile ):
print "ERROR: No MayaVi visualisation file found."
print usage()
sys.exit()
try:
start = string.atoi( args[0] )
except:
print "ERROR: Bad starting id %s"%args[0]
print usage()
sys.exit()
try:
finish = string.atoi( args[1] )
except:
print "ERROR: Bad last id %s"%args[1]
print usage()
sys.exit()
# Movie file that we'll be writting to
moviefile = flfile+"."+movie
# instantiate a MayaVi
v = mayavi.mayavi()
# load the visualisation
v.load_visualization(mvfile)
# grab the DataVizManager list
dvms = v.get_dvm_names()
# list frames to be animated
frames = ""
for id in range(start, finish+1):
# Get frame name
frame='./'+flfile+'%04d.png'%id
if (movie=="avi"):
frames = frames+","+frame
else:
frames = frames+" "+frame
# Are we recycling frames
if recycle:
if os.path.isfile(frame):
continue
# Make a vtk file
datafile = get_file (flfile, id)
# go through all the DVM's
for i in dvms:
# grab a handle to the DVM
dvm = v.mayavi.data_viz_mgr[i]
# follow the pipeline, and load in the new data file
ds = dvm.get_data_source ()
rdr = ds.get_reader ()
rdr.SetFileName (datafile)
ds.reread_file ()
v.Render ()
# write this image to the disk
v.renwin.save_png (frame)
# Bail out if no movie desired
if (movie=="non"):
return 0
# Now generate the avi file
pid = os.getpid ()
tmpfile_name = os.environ['PWD']+"/.tmpmvfile%d"%pid
tmpfile = open(tmpfile_name, 'w')
tmpfile.write( "#!/bin/sh\n")
if (movie=="avi"):
tmpfile.write( "mencoder mf://"+flfile+"*.png -mf fps=" + str(fps) + ":type=png -ovc copy -o "+flfile+".avi\n")
else:
tmpfile.write( "convert -delay " + str(100.0 / fps) + " -loop 0 "+frames+" "+flfile+".gif\n" )
tmpfile.write( "rm -f %s\n"%tmpfile_name )
tmpfile.close()
os.chmod(tmpfile_name, 0700)
# Generate animation
if os.fork() == 0:
os.execl( tmpfile_name, "" )
else:
os.wait()
return 0
if __name__ == "__main__":
main()
|
FluidityProject/multifluids
|
scripts/fl_mayavi_animate.py
|
Python
|
lgpl-2.1
| 5,950
|
[
"Mayavi",
"VTK"
] |
7181226440a52269b07ac2484cfbb92710062d57368ea709d4ea0886be53c6d9
|
#file: import_problem.py
#from __future__ import print_function, division
#import os as os
import time as time
#import pyzdde.zdde as pyz # PROBLEM
#import matplotlib.pyplot as plt # PROBLEM
#import iutils.opticsutils.foptics as fou # PROBLEM
#import iutils.plotutils.mplutils as mpu # PROBLEM
#import iutils.pyutils.genutils as gu # PROBLEM
#import iutils.pyutils.aputils as apu # PROBLEM
#import scipy as sp # NO PROBLEM
#import numpy as np # NO PROBLEM
#import mayavi as mayavi # NO PROBLEM
#import iutils.pyutils.display as du # PROBLEM SHOWS UP SOMETIME
from ctypes import WinDLL, c_int, c_double, Structure, POINTER
class DdeArrayData(Structure):
_fields_ = [( 'x', c_double), ('y', c_double), ('z', c_double),
( 'l', c_double), ('m', c_double), ('n', c_double),
('opd', c_double), ('intensity', c_double),
('Exr', c_double), ('Exi', c_double),
('Eyr', c_double), ('Eyi', c_double),
('Ezr', c_double), ('Ezi', c_double),
('wave', c_int), ('error', c_int),
('vigcode', c_int), ('want_opd', c_int)]
def my_function( ):
num_rays = 441
rd = (DdeArrayData * (num_rays + 1))()
# Setup a basic ray data array for test
rd[0].opd = 0.0
rd[0].wave = 0
rd[0].error = c_int(num_rays)
rd[0].want_opd = -1
k = 0
for i in xrange(-10, 11, 1):
for j in xrange(-10, 11, 1):
k += 1
rd[k].z = i/20.0
rd[k].l = j/20.0
rd[k].intensity = 1.0
rd[k].wave = 1
start_time = time.clock()
ret = arrayTrace(rd) # Call the C function
end_time = time.clock()
print "Time before tracing: ", (start_time)*10e6, "micro seconds"
print "Ray tracing took", (end_time - start_time)*10e3, " milli seconds"
if __name__ == '__main__':
array_trace_lib = WinDLL("C:\\tmp\\ArrayTrace.dll")
arrayTrace = array_trace_lib.arrayTrace
arrayTrace.restype = c_int
arrayTrace.argtypes = [POINTER(DdeArrayData)]
my_function()
|
indranilsinharoy/zArrayTrace
|
import_problem.py
|
Python
|
mit
| 2,170
|
[
"Mayavi"
] |
6c2d6afce14f06cd93bb7ae233edc7f13ae9c3e20897a0a92f3f9c8f4fbdde83
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cPickle
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import passlib.utils
import re
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from cStringIO import StringIO
from collections import defaultdict, Hashable, Iterable, Mapping, OrderedDict
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if openerp.tools.config['db_host']:
env['PGHOST'] = openerp.tools.config['db_host']
if openerp.tools.config['db_port']:
env['PGPORT'] = str(openerp.tools.config['db_port'])
if openerp.tools.config['db_user']:
env['PGUSER'] = openerp.tools.config['db_user']
if openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
try:
import xlwt
# add some sanitizations to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'de_CH': u'German (CH) / Deutsch (CH)',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'eu_ES': u'Basque / Euskara',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ka_GE': u'Georgian / ქართული ენა',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'my_MM': u'Burmese / မြန်မာဘာသာ',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def str2bool(s, default=None):
s = ustr(s).lower()
y = 'y yes 1 true t on'.split()
n = 'n no 0 false f off'.split()
if s not in (y + n):
if default is None:
raise ValueError('Use 0/1/yes/no/true/false/on/off')
return bool(default)
return s in y
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
try:
return hash(arg)
except Exception:
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(map(freehash, arg)))
else:
return id(arg)
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.iteritems()))
class Collector(Mapping):
""" A mapping from keys to lists. This is essentially a space optimization
for ``defaultdict(list)``.
"""
__slots__ = ['_map']
def __init__(self):
self._map = {}
def add(self, key, val):
vals = self._map.setdefault(key, [])
if val not in vals:
vals.append(val)
def __getitem__(self, key):
return self._map.get(key, ())
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif currency_obj:
digits = currency_obj.decimal_places
elif (hasattr(value, '_field') and isinstance(value._field, (float_field, function_field)) and value._field.digits):
digits = value._field.digits[1]
if not digits and digits is not 0:
digits = DEFAULT_DIGITS
if isinstance(value, (str, unicode)) and not value:
return ''
lang = env.user.company_id.partner_id.lang or 'en_US'
lang_objs = env['res.lang'].search([('code', '=', lang)])
if not lang_objs:
lang_objs = env['res.lang'].search([], limit=1)
lang_obj = lang_objs[0]
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj and currency_obj.symbol:
if currency_obj.position == 'after':
res = '%s %s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s %s' % (currency_obj.symbol, res)
return res
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq)
class Pickle(object):
@classmethod
def load(cls, stream, errors=False):
unpickler = cPickle.Unpickler(stream)
# pickle builtins: str/unicode, int/long, float, bool, tuple, list, dict, None
unpickler.find_global = None
try:
return unpickler.load()
except Exception:
_logger.warning('Failed unpickling data, returning default: %r', errors, exc_info=True)
return errors
@classmethod
def loads(cls, text):
return cls.load(StringIO(text))
dumps = cPickle.dumps
dump = cPickle.dump
pickle = Pickle
|
be-cloud-be/horizon-addons
|
server/openerp/tools/misc.py
|
Python
|
agpl-3.0
| 40,141
|
[
"VisIt"
] |
825a1af5ab0e07ba27353c2108aab9232fd41333c55cffa794ba2b36aa21cafd
|
"""Gaussian process-based minimization algorithms."""
import numpy as np
from sklearn.utils import check_random_state
from .base import base_minimize
from ..utils import cook_estimator
from ..utils import normalize_dimensions
def gp_minimize(func, dimensions, base_estimator=None,
n_calls=100, n_random_starts=None,
n_initial_points=10,
initial_point_generator="random",
acq_func="gp_hedge", acq_optimizer="auto", x0=None, y0=None,
random_state=None, verbose=False, callback=None,
n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96,
noise="gaussian", n_jobs=1, model_queue_size=None):
"""Bayesian optimization using Gaussian Processes.
If every function evaluation is expensive, for instance
when the parameters are the hyperparameters of a neural network
and the function evaluation is the mean cross-validation score across
ten folds, optimizing the hyperparameters by standard optimization
routines would take for ever!
The idea is to approximate the function using a Gaussian process.
In other words the function values are assumed to follow a multivariate
gaussian. The covariance of the function values are given by a
GP kernel between the parameters. Then a smart choice to choose the
next parameter to evaluate can be made by the acquisition function
over the Gaussian prior which is much quicker to evaluate.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_initial_points` evaluations.
Finally, `n_calls - len(x0) - n_initial_points` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_initial_points` evaluations are first made then
`n_calls - n_initial_points` subsequent evaluations are made
guided by the surrogate model.
The first `n_initial_points` are generated by the
`initial_point_generator`.
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use :func:`skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See `use_named_args` for an example.
dimensions : [list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note:: The upper and lower bounds are inclusive for `Integer`
dimensions.
base_estimator : a Gaussian process estimator
The Gaussian process estimator to use for optimization.
By default, a Matern kernel is used with the following
hyperparameters tuned.
- All the length scales of the Matern kernel.
- The covariance amplitude that each element is multiplied with.
- Noise that is added to the matern kernel. The noise is assumed
to be iid gaussian.
n_calls : int, default: 100
Number of calls to `func`.
n_random_starts : int, default: None
Number of evaluations of `func` with random points before
approximating it with `base_estimator`.
.. deprecated:: 0.8
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_point_generator : str, InitialPointGenerator instance, \
default: 'random'
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
acq_func : string, default: `"gp_hedge"`
Function to minimize over the gaussian prior. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"gp_hedge"` Probabilistically choose one of the above three
acquisition functions at every iteration. The weightage
given to these gains can be set by :math:`\\eta` through
`acq_func_kwargs`.
- The gains `g_i` are initialized to zero.
- At every iteration,
- Each acquisition function is optimised independently to
propose an candidate point `X_i`.
- Out of all these candidate points, the next point `X_best` is
chosen by :math:`softmax(\\eta g_i)`
- After fitting the surrogate model with `(X_best, y_best)`,
the gains are updated such that :math:`g_i -= \\mu(X_i)`
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps"`
acq_optimizer : string, `"sampling"` or `"lbfgs"`, default: `"lbfgs"`
Method to minimize the acquisition function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
The `acq_func` is computed at `n_points` sampled randomly.
- If set to `"auto"`, then `acq_optimizer` is configured on the
basis of the space searched over.
If the space is Categorical then this is set to be `"sampling"`.
- If set to `"sampling"`, then the point among these `n_points`
where the `acq_func` is minimum is the next candidate minimum.
- If set to `"lbfgs"`, then
- The `n_restarts_optimizer` no. of points which the acquisition
function is least are taken as start points.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default: False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, list of callables, optional
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
n_points : int, default: 10000
Number of points to sample to determine the next "best" point.
Useless if acq_optimizer is set to `"lbfgs"`.
n_restarts_optimizer : int, default: 5
The number of restarts of the optimizer when `acq_optimizer`
is `"lbfgs"`.
kappa : float, default: 1.96
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
xi : float, default: 0.01
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
noise : float, default: "gaussian"
- Use noise="gaussian" if the objective returns noisy observations.
The noise of each observation is assumed to be iid with
mean zero and a fixed variance.
- If the variance is known before-hand, this can be set directly
to the variance of the noise.
- Set this to a value close to zero (1e-10) if the function is
noise-free. Setting to zero might cause stability issues.
n_jobs : int, default: 1
Number of cores to run in parallel while running the lbfgs
optimizations over the acquisition function. Valid only
when `acq_optimizer` is set to `"lbfgs"`.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
.. seealso:: functions :class:`skopt.forest_minimize`,
:class:`skopt.dummy_minimize`, :class:`skopt.gbrt_minimize`
"""
# Check params
rng = check_random_state(random_state)
space = normalize_dimensions(dimensions)
if base_estimator is None:
base_estimator = cook_estimator(
"GP", space=space, random_state=rng.randint(0, np.iinfo(np.int32).max),
noise=noise)
return base_minimize(
func, space, base_estimator=base_estimator,
acq_func=acq_func,
xi=xi, kappa=kappa, acq_optimizer=acq_optimizer, n_calls=n_calls,
n_points=n_points, n_random_starts=n_random_starts,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
n_restarts_optimizer=n_restarts_optimizer,
x0=x0, y0=y0, random_state=rng, verbose=verbose,
callback=callback, n_jobs=n_jobs, model_queue_size=model_queue_size)
|
scikit-optimize/scikit-optimize
|
skopt/optimizer/gp.py
|
Python
|
bsd-3-clause
| 11,787
|
[
"Gaussian"
] |
20e4816ae03357945c90bce23299005197e33391c928e946de8995f2959e429f
|
#!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
def dummy_wrapper(str):
'''
Dummy Translation wrapper, just returning the same string.
'''
return to_unicode(str)
def dummyP_wrapper(str1, str2, n):
'''
Dummy Plural Translation wrapper, just returning the singular or plural
string.
'''
if n == 1:
return str1
else:
return str2
# This is ported from ustr_utf8_* which I got from:
# http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
# I've tried to leave it close to the original C (same names etc.) so that
# it is easy to read/compare both versions...
# ----------------------------- BEG utf8 -----------------------------
# This is an implementation of wcwidth() and wcswidth() (defined in
# IEEE Std 1002.1-2001) for Unicode.
#
# http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
# http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
#
# In fixed-width output devices, Latin characters all occupy a single
# "cell" position of equal width, whereas ideographic CJK characters
# occupy two such cells. Interoperability between terminal-line
# applications and (teletype-style) character terminals using the
# UTF-8 encoding requires agreement on which character should advance
# the cursor by how many cell positions. No established formal
# standards exist at present on which Unicode character shall occupy
# how many cell positions on character terminals. These routines are
# a first attempt of defining such behavior based on simple rules
# applied to data provided by the Unicode Consortium.
#
# [...]
#
# Markus Kuhn -- 2007-05-26 (Unicode 5.0)
#
# Permission to use, copy, modify, and distribute this software
# for any purpose and without fee is hereby granted. The author
# disclaims all warranties with regard to this software.
#
# Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
def __utf8_bisearch(ucs, table):
""" auxiliary function for binary search in interval table. """
min = 0
max = len(table) - 1
if ucs < table[min][0] or ucs > table[max][1]:
return False
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return True
return False
# sorted list of non-overlapping intervals of non-spacing characters
# generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c"
__combining = (
( 0x0300, 0x036F ), ( 0x0483, 0x0486 ), ( 0x0488, 0x0489 ),
( 0x0591, 0x05BD ), ( 0x05BF, 0x05BF ), ( 0x05C1, 0x05C2 ),
( 0x05C4, 0x05C5 ), ( 0x05C7, 0x05C7 ), ( 0x0600, 0x0603 ),
( 0x0610, 0x0615 ), ( 0x064B, 0x065E ), ( 0x0670, 0x0670 ),
( 0x06D6, 0x06E4 ), ( 0x06E7, 0x06E8 ), ( 0x06EA, 0x06ED ),
( 0x070F, 0x070F ), ( 0x0711, 0x0711 ), ( 0x0730, 0x074A ),
( 0x07A6, 0x07B0 ), ( 0x07EB, 0x07F3 ), ( 0x0901, 0x0902 ),
( 0x093C, 0x093C ), ( 0x0941, 0x0948 ), ( 0x094D, 0x094D ),
( 0x0951, 0x0954 ), ( 0x0962, 0x0963 ), ( 0x0981, 0x0981 ),
( 0x09BC, 0x09BC ), ( 0x09C1, 0x09C4 ), ( 0x09CD, 0x09CD ),
( 0x09E2, 0x09E3 ), ( 0x0A01, 0x0A02 ), ( 0x0A3C, 0x0A3C ),
( 0x0A41, 0x0A42 ), ( 0x0A47, 0x0A48 ), ( 0x0A4B, 0x0A4D ),
( 0x0A70, 0x0A71 ), ( 0x0A81, 0x0A82 ), ( 0x0ABC, 0x0ABC ),
( 0x0AC1, 0x0AC5 ), ( 0x0AC7, 0x0AC8 ), ( 0x0ACD, 0x0ACD ),
( 0x0AE2, 0x0AE3 ), ( 0x0B01, 0x0B01 ), ( 0x0B3C, 0x0B3C ),
( 0x0B3F, 0x0B3F ), ( 0x0B41, 0x0B43 ), ( 0x0B4D, 0x0B4D ),
( 0x0B56, 0x0B56 ), ( 0x0B82, 0x0B82 ), ( 0x0BC0, 0x0BC0 ),
( 0x0BCD, 0x0BCD ), ( 0x0C3E, 0x0C40 ), ( 0x0C46, 0x0C48 ),
( 0x0C4A, 0x0C4D ), ( 0x0C55, 0x0C56 ), ( 0x0CBC, 0x0CBC ),
( 0x0CBF, 0x0CBF ), ( 0x0CC6, 0x0CC6 ), ( 0x0CCC, 0x0CCD ),
( 0x0CE2, 0x0CE3 ), ( 0x0D41, 0x0D43 ), ( 0x0D4D, 0x0D4D ),
( 0x0DCA, 0x0DCA ), ( 0x0DD2, 0x0DD4 ), ( 0x0DD6, 0x0DD6 ),
( 0x0E31, 0x0E31 ), ( 0x0E34, 0x0E3A ), ( 0x0E47, 0x0E4E ),
( 0x0EB1, 0x0EB1 ), ( 0x0EB4, 0x0EB9 ), ( 0x0EBB, 0x0EBC ),
( 0x0EC8, 0x0ECD ), ( 0x0F18, 0x0F19 ), ( 0x0F35, 0x0F35 ),
( 0x0F37, 0x0F37 ), ( 0x0F39, 0x0F39 ), ( 0x0F71, 0x0F7E ),
( 0x0F80, 0x0F84 ), ( 0x0F86, 0x0F87 ), ( 0x0F90, 0x0F97 ),
( 0x0F99, 0x0FBC ), ( 0x0FC6, 0x0FC6 ), ( 0x102D, 0x1030 ),
( 0x1032, 0x1032 ), ( 0x1036, 0x1037 ), ( 0x1039, 0x1039 ),
( 0x1058, 0x1059 ), ( 0x1160, 0x11FF ), ( 0x135F, 0x135F ),
( 0x1712, 0x1714 ), ( 0x1732, 0x1734 ), ( 0x1752, 0x1753 ),
( 0x1772, 0x1773 ), ( 0x17B4, 0x17B5 ), ( 0x17B7, 0x17BD ),
( 0x17C6, 0x17C6 ), ( 0x17C9, 0x17D3 ), ( 0x17DD, 0x17DD ),
( 0x180B, 0x180D ), ( 0x18A9, 0x18A9 ), ( 0x1920, 0x1922 ),
( 0x1927, 0x1928 ), ( 0x1932, 0x1932 ), ( 0x1939, 0x193B ),
( 0x1A17, 0x1A18 ), ( 0x1B00, 0x1B03 ), ( 0x1B34, 0x1B34 ),
( 0x1B36, 0x1B3A ), ( 0x1B3C, 0x1B3C ), ( 0x1B42, 0x1B42 ),
( 0x1B6B, 0x1B73 ), ( 0x1DC0, 0x1DCA ), ( 0x1DFE, 0x1DFF ),
( 0x200B, 0x200F ), ( 0x202A, 0x202E ), ( 0x2060, 0x2063 ),
( 0x206A, 0x206F ), ( 0x20D0, 0x20EF ), ( 0x302A, 0x302F ),
( 0x3099, 0x309A ), ( 0xA806, 0xA806 ), ( 0xA80B, 0xA80B ),
( 0xA825, 0xA826 ), ( 0xFB1E, 0xFB1E ), ( 0xFE00, 0xFE0F ),
( 0xFE20, 0xFE23 ), ( 0xFEFF, 0xFEFF ), ( 0xFFF9, 0xFFFB ),
( 0x10A01, 0x10A03 ), ( 0x10A05, 0x10A06 ), ( 0x10A0C, 0x10A0F ),
( 0x10A38, 0x10A3A ), ( 0x10A3F, 0x10A3F ), ( 0x1D167, 0x1D169 ),
( 0x1D173, 0x1D182 ), ( 0x1D185, 0x1D18B ), ( 0x1D1AA, 0x1D1AD ),
( 0x1D242, 0x1D244 ), ( 0xE0001, 0xE0001 ), ( 0xE0020, 0xE007F ),
( 0xE0100, 0xE01EF ))
def __utf8_ucp_width(ucs):
""" Get the textual width of a ucs character. """
# test for 8-bit control characters
if ucs == 0:
return 0
if ucs < 32 or (ucs >= 0x7f and ucs < 0xa0):
return (-1)
if __utf8_bisearch(ucs, __combining):
return 0
# if we arrive here, ucs is not a combining or C0/C1 control character
return (1 +
(ucs >= 0x1100 and
(ucs <= 0x115f or # Hangul Jamo init. consonants
ucs == 0x2329 or ucs == 0x232a or
(ucs >= 0x2e80 and ucs <= 0xa4cf and
ucs != 0x303f) or # CJK ... Yi
(ucs >= 0xac00 and ucs <= 0xd7a3) or # Hangul Syllables
(ucs >= 0xf900 and ucs <= 0xfaff) or # CJK Compatibility Ideographs
(ucs >= 0xfe10 and ucs <= 0xfe19) or # Vertical forms
(ucs >= 0xfe30 and ucs <= 0xfe6f) or # CJK Compatibility Forms
(ucs >= 0xff00 and ucs <= 0xff60) or # Fullwidth Forms
(ucs >= 0xffe0 and ucs <= 0xffe6) or
(ucs >= 0x20000 and ucs <= 0x2fffd) or
(ucs >= 0x30000 and ucs <= 0x3fffd))))
def __utf8_iter_ints(msg):
for byte in to_utf8(msg):
yield ord(byte)
def __utf8_iter_ucs(msg):
uiter = __utf8_iter_ints(msg)
for byte0 in uiter:
if byte0 < 0x80: # 0xxxxxxx
yield (byte0, 1)
elif (byte0 & 0xe0) == 0xc0: # 110XXXXx 10xxxxxx
byte1 = uiter.next()
if (((byte1 & 0xc0) != 0x80) or
((byte0 & 0xfe) == 0xc0)): # overlong?
yield (None, 2)
return
yield ((((byte0 & 0x1f) << 6) | (byte1 & 0x3f)), 2)
elif (byte0 & 0xf0) == 0xe0: # 1110XXXX 10Xxxxxx 10xxxxxx
byte1 = uiter.next()
byte2 = uiter.next()
if (((byte1 & 0xc0) != 0x80) or ((byte2 & 0xc0) != 0x80) or
((byte0 == 0xe0) and ((byte1 & 0xe0) == 0x80)) or # overlong?
((byte0 == 0xed) and ((byte1 & 0xe0) == 0xa0)) or # surrogate?
((byte0 == 0xef) and (byte1 == 0xbf) and
((byte2 & 0xfe) == 0xbe))): # U+FFFE or U+FFFF?
yield (None, 3)
return
yield ((((byte0 & 0x0f) << 12) | ((byte1 & 0x3f) << 6) |
(byte2 & 0x3f)), 3)
elif (byte0 & 0xf8) == 0xf0: # 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx
byte1 = uiter.next()
byte2 = uiter.next()
byte3 = uiter.next()
if (((byte1 & 0xc0) != 0x80) or
((byte2 & 0xc0) != 0x80) or
((byte3 & 0xc0) != 0x80) or
((byte0 == 0xf0) and ((byte1 & 0xf0) == 0x80)) or # overlong?
((byte0 == 0xf4) and (byte1 > 0x8f)) or # > U+10FFFF?
(byte0 > 0xf4)): # > U+10FFFF?
yield (None, 4)
return
yield ((((byte0 & 0x07) << 18) | ((byte1 & 0x3f) << 12) |
((byte2 & 0x3f) << 6) | (byte3 & 0x3f)), 4)
else:
yield (None, 1)
return
def utf8_width(msg):
""" Get the textual width of a utf8 string. """
ret = 0
for (ucs, bytes) in __utf8_iter_ucs(msg):
if ucs is None:
ret += bytes # Ugly ... should not feed bad utf8
else:
ret += __utf8_ucp_width(ucs)
return ret
def utf8_width_chop(msg, chop=None):
""" Return the textual width of a utf8 string, chopping it to a specified
value. This is what you want to use instead of %.*s, as it does the
"right" thing with regard to utf-8 sequences. Eg.
"%.*s" % (10, msg) <= becomes => "%s" % (utf8_width_chop(msg, 10)) """
if chop is None or utf8_width(msg) <= chop:
return utf8_width(msg), msg
ret = 0
passed_unicode = isinstance(msg, unicode)
msg_bytes = 0
msg = to_utf8(msg)
for (ucs, bytes) in __utf8_iter_ucs(msg):
if ucs is None:
width = bytes # Ugly ... should not feed bad utf8
else:
width = __utf8_ucp_width(ucs)
if chop is not None and (ret + width) > chop:
msg = msg[:msg_bytes]
break
ret += width
msg_bytes += bytes
if passed_unicode:
msg = to_unicode(msg)
return ret, msg
def utf8_width_fill(msg, fill, chop=None, left=True, prefix='', suffix=''):
""" Expand a utf8 msg to a specified "width" or chop to same.
Expansion can be left or right. This is what you want to use instead of
%*.*s, as it does the "right" thing with regard to utf-8 sequences.
prefix and suffix should be used for "invisible" bytes, like
highlighting.
Eg.
"%-*.*s" % (10, 20, msg)
<= becomes =>
"%s" % (utf8_width_fill(msg, 10, 20)).
"%20.10s" % (msg)
<= becomes =>
"%s" % (utf8_width_fill(msg, 20, 10, left=False)).
"%s%.10s%s" % (prefix, msg, suffix)
<= becomes =>
"%s" % (utf8_width_fill(msg, 0, 10, prefix=prefix, suffix=suffix)).
"""
passed_msg = msg
width, msg = utf8_width_chop(msg, chop)
if width >= fill:
if prefix or suffix:
msg = ''.join([prefix, msg, suffix])
else:
extra = " " * (fill - width)
if left:
msg = ''.join([prefix, msg, suffix, extra])
else:
msg = ''.join([extra, prefix, msg, suffix])
if isinstance(passed_msg, unicode):
return to_unicode(msg)
return msg
def utf8_valid(msg):
""" Return True/False is the text is valid utf8. """
for (ucs, bytes) in __utf8_iter_ucs(msg):
if ucs is None:
return False
return True
def _utf8_width_le(width, *args):
""" Minor speed hack, we often want to know "does X fit in Y". It takes
"a while" to work out a utf8_width() (see above), and we know that a
utf8 character is always <= byte. So given:
assert bytes >= characters
characters <= width?
...we can change to:
bytes <= width or characters <= width
...and bytes are much faster. """
# This assumes that all args. are utf8.
ret = 0
for arg in args:
ret += len(arg)
if ret <= width:
return True
ret = 0
for arg in args:
ret += utf8_width(arg)
return ret <= width
def utf8_text_wrap(text, width=70, initial_indent='', subsequent_indent=''):
""" Works like we want textwrap.wrap() to work, uses utf-8 data and
doesn't screw up lists/blocks/etc. """
# Tested with:
# yum info robodoc gpicview php-pear-Net-Socket wmctrl ustr moreutils
# mediawiki-HNP ocspd insight yum mousepad
# ...at 120, 80 and 40 chars.
# Also, notable among lots of others, searching for "\n ":
# exim-clamav, jpackage-utils, tcldom, synaptics, "quake3",
# perl-Class-Container, ez-ipupdate, perl-Net-XMPP, "kipi-plugins",
# perl-Apache-DBI, netcdf, python-configobj, "translate-toolkit", alpine,
# "udunits", "conntrack-tools"
#
# Note that, we "fail" on:
# alsa-plugins-jack, setools*, dblatex, uisp, "perl-Getopt-GUI-Long",
# suitesparse, "synce-serial", writer2latex, xenwatch, ltsp-utils
passed_unicode = isinstance(text, unicode)
def _indent_at_beg(line):
count = 0
byte = 'X'
for byte in line:
if byte != ' ':
break
count += 1
if byte not in ("-", "*", ".", "o", '\xe2'):
return count, 0
list_chr = utf8_width_chop(line[count:], 1)[1]
if list_chr in ("-", "*", ".", "o",
"\xe2\x80\xa2", "\xe2\x80\xa3", "\xe2\x88\x98"):
nxt = _indent_at_beg(line[count+len(list_chr):])
nxt = nxt[1] or nxt[0]
if nxt:
return count, count + 1 + nxt
return count, 0
initial_indent = to_utf8(initial_indent)
subsequent_indent = to_utf8(subsequent_indent)
text = to_utf8(text).rstrip('\n')
lines = to_utf8(text).replace('\t', ' ' * 8).split('\n')
ret = []
indent = initial_indent
wrap_last = False
csab = 0
cspc_indent = 0
for line in lines:
line = line.rstrip(' ')
(lsab, lspc_indent) = (csab, cspc_indent)
(csab, cspc_indent) = _indent_at_beg(line)
force_nl = False # We want to stop wrapping under "certain" conditions:
if wrap_last and cspc_indent: # if line starts a list or
force_nl = True
if wrap_last and csab == len(line):# is empty line
force_nl = True
if wrap_last and not lspc_indent: # if line doesn't continue a list and
if csab >= 4 and csab != lsab: # is "block indented"
force_nl = True
if force_nl:
ret.append(indent.rstrip(' '))
indent = subsequent_indent
wrap_last = False
if csab == len(line): # empty line, remove spaces to make it easier.
line = ''
if wrap_last:
line = line.lstrip(' ')
cspc_indent = lspc_indent
if _utf8_width_le(width, indent, line):
wrap_last = False
ret.append(indent + line)
indent = subsequent_indent
continue
wrap_last = True
words = line.split(' ')
line = indent
spcs = cspc_indent
if not spcs and csab >= 4:
spcs = csab
for word in words:
if (not _utf8_width_le(width, line, word) and
utf8_width(line) > utf8_width(subsequent_indent)):
ret.append(line.rstrip(' '))
line = subsequent_indent + ' ' * spcs
line += word
line += ' '
indent = line.rstrip(' ') + ' '
if wrap_last:
ret.append(indent.rstrip(' '))
if passed_unicode:
return map(to_unicode, ret)
return ret
def utf8_text_fill(text, *args, **kwargs):
""" Works like we want textwrap.fill() to work, uses utf-8 data and
doesn't screw up lists/blocks/etc. """
return '\n'.join(utf8_text_wrap(text, *args, **kwargs))
# ----------------------------- END utf8 -----------------------------
def to_unicode(obj, encoding='utf-8', errors='replace'):
''' convert a 'str' to 'unicode' '''
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding, errors)
return obj
def to_utf8(obj, errors='replace'):
'''convert 'unicode' to an encoded utf-8 byte string '''
if isinstance(obj, unicode):
obj = obj.encode('utf-8', errors)
return obj
# Don't use this, to_unicode should just work now
def to_unicode_maybe(obj, encoding='utf-8', errors='replace'):
''' Don't ask don't tell, only use when you must '''
try:
return to_unicode(obj, encoding, errors)
except UnicodeEncodeError:
return obj
def to_str(obj):
""" Convert something to a string, if it isn't one. """
# NOTE: unicode counts as a string just fine. We just want objects to call
# their __str__ methods.
if not isinstance(obj, basestring):
obj = str(obj)
return obj
def str_eq(a, b):
""" convert between unicode and not and compare them, w/o warning or being annoying"""
if isinstance(a, unicode) == isinstance(b, unicode):
if a == b: # stupid python...
return True
elif to_utf8(a) == to_utf8(b):
return True
return False
def exception2msg(e):
"""Convert an exception to a message. This function will convert
the exception using to_unicode, unicode, or str, whichever works correctly.
:param e: an exception
:return: a string representation of the exception
"""
# DIE python DIE! Which one works:
# to_unicode(e.value); unicode(e); str(e);
# Call this so you don't have to care.
try:
return to_unicode(e.value)
except:
pass
try:
return unicode(e)
except:
pass
try:
return to_unicode(str(e))
except:
pass
return "<exception failed to convert to text>"
try:
'''
Setup the yum translation domain and make _() and P_() translation wrappers
available.
using ugettext to make sure translated strings are in Unicode.
'''
import gettext
t = gettext.translation('yum', fallback=True)
_ = t.ugettext
P_ = t.ungettext
# we describe yum commands and options with unicode but optparse
# mixes this with non-unicode translations so "yum --help" may fail.
# It's much easier to fix this in optparse than in yum. BZ 1033416
import optparse
if optparse._ is gettext.gettext:
optparse._ = gettext.translation('messages', fallback=True).ugettext
except:
'''
Something went wrong so we make a dummy _() wrapper there is just
returning the same text
'''
_ = dummy_wrapper
P_ = dummyP_wrapper
if __name__ == "__main__":
import sys
def out(arg):
arg = to_utf8(arg)
print "UTF8 :", arg
print "len :", len(arg)
arg = to_unicode(arg)
print "USC :", arg
print "len :", len(arg)
print "valid:", utf8_valid(arg)
print "width:", utf8_width(arg)
print "4.8 :", "%s%s%s" % ('<', utf8_width_fill(arg, 4, 8), '>')
print "4.3 :", "%s%s%s" % ('<', utf8_width_fill(arg, 4, 3), '>')
print "4.2 :", "%s%s%s" % ('<', utf8_width_fill(arg, 4, 2), '>')
print "4.1 :", "%s%s%s" % ('<', utf8_width_fill(arg, 4, 1), '>')
print "3.3 :", "%s%s%s" % ('<', utf8_width_fill(arg, 3, 3), '>')
print "3.2 :", "%s%s%s" % ('<', utf8_width_fill(arg, 3, 2), '>')
print "3.1 :", "%s%s%s" % ('<', utf8_width_fill(arg, 3, 1), '>')
print "40.79:", "%s%s%s" % ('<', utf8_width_fill(arg, 40, 79), '>')
print "40.20:", "%s%s%s" % ('<', utf8_width_fill(arg, 40, 20), '>')
print ''
print " ---- Arguments/str ---- "
for arg in sys.argv[1:]:
out(arg)
print " ---- Arguments/gettext ---- "
for arg in sys.argv[1:]:
try:
arg = _(arg)
except UnicodeDecodeError:
continue
out(arg)
if len(sys.argv) > 2:
print " ---- Arguments/str/all ---- "
out(sys.argv[1] % sys.argv[2:])
print " ---- Arguments/gettext/all ---- "
try:
arg = _(sys.argv[1]) % map(_, sys.argv[2:])
except UnicodeDecodeError:
sys.exit(0)
out(arg)
|
rpm-software-management/yum
|
yum/i18n.py
|
Python
|
gpl-2.0
| 20,860
|
[
"NetCDF"
] |
9eef4bf1e24ea695f0bb7e44e03d21268d7dd927c8a2564bf68809afe125ef41
|
from datetime import datetime
import json
import uuid
from psycopg2._psycopg import DATETIME
from openerp import netsvc
from openerp.osv import fields, osv
import logging
import datetime
_logger = logging.getLogger(__name__)
class atom_event_worker(osv.osv):
_name = 'atom.event.worker'
_auto = False
def _create_customer(self, vals):
ref = vals.get("ref")
name = vals.get("name")
local_name = vals.get("local_name")
village = vals.get("village")
customer = {'ref': ref, 'name': name, 'local_name': local_name, 'village': village}
return customer
def _create_sale_orderline(self,cr,uid,name, product_id, so, uom_obj,external_order_line_id,context):
stored_prod_ids = self.pool.get('product.product').search(cr, uid, [('uuid', '=', product_id)], context=context)
if(len(stored_prod_ids) > 0):
prod_id = stored_prod_ids[0]
prod_obj = self.pool.get('product.product').browse(cr, uid, prod_id)
sale_order_line = {'product_id': prod_id, 'price_unit': prod_obj.list_price, 'product_uom_qty': 1,
'product_uom': uom_obj, 'order_id': so,
'name': name, 'type': 'make_to_stock', 'state': 'draft', 'product_dosage': '0',
'product_number_of_days': '0','external_id':external_order_line_id}
self.pool.get('sale.order.line').create(cr, uid, sale_order_line, context=context)
def _create_sale_order(self, context, cr, cus_id, name, external_id, orders, shop_id, uid, uom_obj):
order = orders[0]
sale_order_group = {'group_id': order['visitId'], 'description': order['description'], 'type': order['type'] }
sale_order_group_ids = self.pool.get('visit').search(cr, uid, [('group_id', '=', order['visitId'])], context=context)
if(len(sale_order_group_ids) == 0):
sog_id = self.pool.get('visit').create(cr, uid, sale_order_group, context=context)
else:
sog_id = sale_order_group_ids[0]
sale_order = {'partner_id': cus_id, 'name': name, 'date': datetime.date.today(), 'shop_id': shop_id,
'partner_invoice_id': cus_id, 'partner_shipping_id': cus_id,
'order_policy': 'manual', 'pricelist_id': 1, 'external_id': external_id, 'group_id': sog_id,'group_description':order['description'] }
if(len(orders) > 0):
so_id = self.pool.get('sale.order').create(cr, uid, sale_order, context=context)
for order in orders:
if(len(order['productIds']) > 0):
self._create_sale_orderline(cr,uid,name, order['productIds'][0], so_id, uom_obj,order.get('id'),context)
def _update_sale_order(self, context, cr, uid, cus_id, name, external_id,shop_id, uom_obj,order_id,orders):
prod_order_Map ={}
group_prod_ids = []
deleted_prod_ids = []
for order in orders:
if(order["voided"]):
deleted_prod_ids += order['productIds']
else:
group_prod_ids = group_prod_ids + order['productIds']
for prodId in order['productIds']:
prod_order_Map[prodId] = order.get('id')
sale_order = self.pool.get('sale.order').browse(cr,uid,order_id)
if(sale_order.state != 'draft'):
raise osv.except_osv(('Error!'),("Sale order is already approved"))
for order_line in sale_order.order_line :
prod_obj = order_line.product_id
ids = [order_line.id]
if prod_obj.uuid in group_prod_ids:
group_prod_ids.remove(prod_obj.uuid)
else :
if prod_obj.uuid in deleted_prod_ids:
self.pool.get('sale.order.line').unlink(cr,uid,ids)
for prod_id in group_prod_ids:
self._create_sale_orderline(cr,uid,name, prod_id, sale_order.id, uom_obj,prod_order_Map[prod_id],context)
def _create_orders(self, cr,uid,vals,context):
customer_id = vals.get("customer_id")
if(vals.get("orders")==None):
return ""
orders_string = vals.get("orders")
order_group = json.loads(orders_string)
order_group_id = order_group.get('id')
orders = order_group.get('openERPOrders')
if(len(orders) == 0):
return ""
group_prod_ids = []
uom_obj = self.pool.get('product.uom').search(cr, uid, [('name', '=', 'Unit(s)')], context=context)[0]
customer_ids = self.pool.get('res.partner').search(cr, uid, [('ref', '=', customer_id)], context=context)
if(len(customer_ids) > 0):
cus_id = self.pool.get('res.partner').search(cr, uid, [('ref', '=', customer_id)], context=context)[0]
shop_id = self.pool.get('sale.shop').search(cr, uid, [('name', '=', 'Pharmacy')], context=context)[0]
name = self.pool.get('ir.sequence').get(cr, uid, 'sale.order')
sale_order_ids = self.pool.get('sale.order').search(cr, uid, [('external_id', '=', order_group_id)], context=context)
if(len(sale_order_ids) == 0) :
self._create_sale_order(context, cr, cus_id, name, order_group_id,orders,shop_id, uid, uom_obj)
else:
self._update_sale_order(context, cr, uid,cus_id, name, order_group_id,shop_id, uom_obj,sale_order_ids[0],orders)
else:
raise osv.except_osv(('Error!'),("Patient Id not found in openerp"))
def _update_marker(self, cr, feed_uri_for_last_read_entry, last_read_entry_id, marker_ids, uid):
for marker_id in marker_ids:
marker = self.pool.get('atom.feed.marker')
marker._update_marker(cr,uid,marker_id,last_read_entry_id, feed_uri_for_last_read_entry)
def _create_marker(self, cr, feed_uri_for_last_read_entry, last_read_entry_id, uid,feed_uri):
marker = {'feed_uri': feed_uri, 'last_read_entry_id': last_read_entry_id,
'feed_uri_for_last_read_entry': feed_uri_for_last_read_entry}
self.pool.get('atom.feed.marker').create(cr, uid, marker)
def _create_or_update_marker(self, cr, uid, vals):
is_failed_event = vals.get('is_failed_event',False)
if(is_failed_event): return
last_read_entry_id = vals.get('last_read_entry_id')
feed_uri_for_last_read_entry = vals.get('feed_uri_for_last_read_entry')
feed_uri = vals.get('feed_uri')
# Rohan/Mujir - do not update markers for failed events (failed events have empty 'feed_uri_for_last_read_entry')
if "$param" in feed_uri_for_last_read_entry or "$param" in feed_uri or feed_uri_for_last_read_entry == None or not feed_uri_for_last_read_entry:
return
marker_ids = self.pool.get('atom.feed.marker').search(cr, uid, [('feed_uri', '=', feed_uri)], limit=1)
if len(marker_ids) > 0:
self._update_marker(cr, feed_uri_for_last_read_entry, last_read_entry_id, marker_ids, uid)
else:
self._create_marker(cr, feed_uri_for_last_read_entry, last_read_entry_id, uid,feed_uri)
def _create_or_update_customer(self,cr, patient_ref, uid, vals,context):
customer = self._create_customer(vals)
existing_customer_ids = self.pool.get('res.partner').search(cr, uid, [('ref', '=', patient_ref)])
if len(existing_customer_ids) > 0:
self.pool.get('res.partner').write(cr, uid, existing_customer_ids[0], customer, context=context)
self._create_or_update_person_attributes(cr, uid, existing_customer_ids[0], vals, context=context)
self._create_or_update_person_address(cr, uid, existing_customer_ids[0], vals, context=context)
else:
cust_id = self.pool.get('res.partner').create(cr, uid, customer, context=context)
self._create_or_update_person_attributes(cr, uid, cust_id, vals, context=context)
self._create_or_update_person_address(cr, uid, cust_id, vals, context=context)
def _create_or_update_person_attributes(self, cr, uid, cust_id, vals, context=None):
attributes = json.loads(vals.get("attributes", "{}"))
for key in attributes:
attribute_id = self.pool.get('res.partner.attributes').search(cr, uid, [('name', '=', key), ('partner_id' , '=', cust_id)])
column_dict = {'name': key, 'value': attributes[key], 'partner_id': cust_id}
if len(attribute_id) > 0:
self.pool.get('res.partner.attributes').write(cr, uid, attribute_id, column_dict, context=context)
else:
self.pool.get('res.partner.attributes').create(cr, uid, column_dict, context=context)
def _create_or_update_person_address(self, cr, uid, cust_id, vals, context=None):
try:
address = json.loads(vals.get("preferredAddress", "{}"))
except ValueError:
raise ValueError("Could not retrive preferred address from the String - %s" % str(vals))
existing_address = self.pool.get('res.partner.address').search(cr, uid, [('partner_id' , '=', cust_id)])
if not address and not existing_address:
return
column_dict = {
'address1': address['address1'],
'address2': address['address2'],
'city_village': address['cityVillage'],
'state_province': address['stateProvince'],
'country': address['country'],
'county_district': address['countyDistrict'],
'address3': address['address3'],
'partner_id': cust_id
}
if len(existing_address) > 0:
self.pool.get('res.partner.address').write(cr, uid, existing_address, column_dict, context=context)
else:
self.pool.get('res.partner.address').create(cr, uid, column_dict, context=context)
def process_event(self, cr, uid, vals,context=None):
_logger.info("vals")
_logger.info(vals)
category = vals.get("category")
patient_ref = vals.get("ref")
if(category == "create.customer"):
self._create_or_update_customer( cr, patient_ref, uid, vals,context)
if(category == "create.sale.order"):
sale_order = self._create_orders(cr,uid,vals,context)
if(category == "create.lab.test"):
self.pool.get('lab.test.service').create_or_update_labtest(cr,uid,vals,context)
if(category == "create.drug"):
self.pool.get('drug.service').create_or_update_drug(cr,uid,vals,context)
if(category == "create.drug.category"):
self.pool.get('drug.service').create_or_update_drug_category(cr,uid,vals,context)
if(category == "create.drug.uom"):
self.pool.get('product.uom.service').create_or_update_product_uom(cr,uid,vals,context)
if(category == "create.drug.uom.category"):
self.pool.get('product.uom.service').create_or_update_product_uom_category(cr,uid,vals,context)
self._create_or_update_marker(cr, uid, vals)
return {'success': True}
class atom_feed_marker(osv.osv):
_name = 'atom.feed.marker'
_table = 'markers'
def _update_marker(self,cr,uid,marker_id,last_read_entry_id,feed_uri_for_last_read_entry):
# marker = self.pool.get('atom.feed.marker').browse(marker_id)
self.write(cr, uid, marker_id, {'last_read_entry_id': last_read_entry_id,'feed_uri_for_last_read_entry': feed_uri_for_last_read_entry,})
_columns ={
'feed_uri':fields.char("uuid", size=250, translate=True, required=True),
'last_read_entry_id':fields.char("Title", size=250, translate=True, required=True),
'feed_uri_for_last_read_entry':fields.char("Category", size=100, translate=True, required=True),
}
|
3dfxsoftware/cbss-addons
|
bahmni_atom_feed/atom_feed_client.py
|
Python
|
gpl-2.0
| 11,817
|
[
"VisIt"
] |
58a62ce2133c0150d12d2decab3188e77fa7dd4a724b7746d5c06ea077638ffc
|
class ASTVisitor():
def visit(self, astnode):
'A read-only function which looks at a single AST node.'
pass
def return_value(self):
return None
class ASTNode(object):
def __init__(self):
self.parent = None
self._children = []
@property
def children(self):
return self._children
@children.setter
def children(self, children):
self._children = children
for child in children:
child.parent = self
def pprint(self, indent=''):
'''Recursively prints a formatted string representation of the AST.'''
print(indent + self.__class__.__name__)
for child in self._children:
child.pprint(indent + " ")
def walk(self, visitor):
'''Traverses an AST, calling visitor.visit() on every node.
This is a depth-first, pre-order traversal. Parents will be visited before
any children, children will be visited in order, and (by extension) a node's
children will all be visited before its siblings.
The visitor may modify attributes, but may not add or delete nodes.'''
visitor.visit(self)
for child in self._children:
child.walk(visitor)
return visitor.return_value()
def mod_walk(self, mod_visitor):
'''Traverses an AST, building up a return value from visitor methods.
Similar to walk(), but constructs a return value from the result of
postvisit() calls. This can be used to modify an AST by building up the
desired new AST with return values.'''
selfval = mod_visitor.visit(self)
child_values = [child.mod_walk(mod_visitor) for child in self.children]
retval = mod_visitor.post_visit(self, selfval, child_values)
return retval
class ASTProgram(ASTNode):
def __init__(self, statements):
super().__init__()
self.children = statements
class ASTImport(ASTNode):
def __init__(self, mod):
super().__init__()
self.mod = mod
@property
def module(self):
return self.mod
class ASTComponent(ASTNode):
def __init__(self, name, statements):
super().__init__()
self.children = [name] + statements
@property
def name(self):
return self.children[0]
@property
def expressions(self):
return self.children[1:]
class ASTInputExpr(ASTNode):
def __init__(self, declarations):
super().__init__()
self.children = declarations
class ASTOutputExpr(ASTNode): # TODO
def __init__(self, declarations):
super().__init__()
self.children = declarations
class ASTAssignmentExpr(ASTNode): # TODO
def __init__(self, name, expression):
super().__init__()
self.children = [name] + [expression]
@property
def binding(self): # TODO
return self.children[0]
@property
def value(self): # TODO
return self.children[1:]
class ASTEvalExpr(ASTNode): # TODO
def __init__(self, op, parameters):
super().__init__()
self.children = [op] + parameters
@property
def op(self):
return self.children[0]
@property
def args(self):
return self.children[1:]
# These are already complete.
class ASTID(ASTNode):
def __init__(self, name, typedecl=None):
super().__init__()
self.name = name
self.type = typedecl
class ASTLiteral(ASTNode):
def __init__(self, value):
super().__init__()
self.value = value
self.type = 'Scalar'
class ASTModVisitor(ASTVisitor):
'''A visitor class that can also construct a new, modified AST.
Two methods are offered: the normal visit() method, which focuses on analyzing
and/or modifying a single node; and the post_visit() method, which allows you
to modify the child list of a node.
The default implementation does nothing; it simply builds up itself, unmodified.'''
def visit(self, astnode):
# Note that this overrides the super's implementation, because we need a
# non-None return value.
return astnode
def post_visit(self, visit_value, child_values):
'''A function which constructs a return value out of its children.
This can be used to modify an AST by returning a different or modified
ASTNode than the original. The top-level return value will then be the
new AST.'''
return visit_value
|
mc-hammertimeseries/cs207project
|
pype/ast.py
|
Python
|
mit
| 4,504
|
[
"VisIt"
] |
adcd3660455bd9a300272816993e34d66616f2f098101e916c0966c900d5f005
|
# MIT License
#
# Copyright (c) 2016 Bing Huang and Anders S. Christensen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import scipy.spatial.distance as ssd
import itertools as itl
import numpy as np
from .fslatm import fget_sbot
from .fslatm import fget_sbot_local
from .fslatm import fget_sbop
from .fslatm import fget_sbop_local
def update_m(obj, ia, rcut=9.0, pbc=None):
"""
retrieve local structure around atom `ia
for periodic systems (or very large system)
"""
zs, coords, c = obj
v1, v2, v3 = c
vs = ssd.norm(c, axis=0)
nns = []
for i,vi in enumerate(vs):
n1_doulbe = rcut/li
n1 = int(n1_doulbe)
if n1 - n1_doulbe == 0:
n1s = range(-n1, n1+1) if pbc[i] else [0,]
elif n1 == 0:
n1s = [-1,0,1] if pbc[i] else [0,]
else:
n1s = range(-n1-1, n1+2) if pbc[i] else [0,]
nns.append(n1s)
n1s,n2s,n3s = nns
n123s_ = np.array( list( itl.product(n1s,n2s,n3s) ) )
n123s = []
for n123 in n123s_:
n123u = list(n123)
if n123u != [0,0,0]: n123s.append(n123u)
nau = len(n123s)
n123s = np.array(n123s, np.float)
na = len(zs)
cia = coords[ia]
if na == 1:
ds = np.array([[0.]])
else:
ds = ssd.squareform( ssd.pdist(coords) )
zs_u = []; coords_u = []
zs_u.append( zs[ia] ); coords_u.append( coords[ia] )
for i in range(na) :
di = ds[i,ia]
if (di > 0) and (di <= rcut):
zs_u.append(zs[i]); coords_u.append(coords[ia])
# add new coords by translation
ts = np.zeros((nau,3))
for iau in range(nau):
ts[iau] = np.dot(n123s[iau],c)
coords_iu = coords[i] + ts #np.dot(n123s, c)
dsi = ssd.norm(coords_iu - cia, axis=1);
filt = np.logical_and(dsi > 0, dsi <= rcut); nx = filt.sum()
zs_u += [zs[i],]*nx
coords_u += [ list( coords_iu[filt,:] ), ]
obj_u = [zs_u, coords_u]
return obj_u
def get_boa(z1, zs_):
return z1*np.array( [(zs_ == z1).sum(), ])
#return -0.5*z1**2.4*np.array( [(zs_ == z1).sum(), ])
def get_sbop(mbtype, obj, iloc=False, ia=None, normalize=True, sigma=0.05, \
rcut=4.8, dgrid=0.03, pbc='000', rpower=6):
"""
two-body terms
:param obj: molecule object, consisting of two parts: [ zs, coords ]
:type obj: list
"""
z1, z2 = mbtype
zs, coords, c = obj
if iloc:
assert ia != None, '#ERROR: plz specify `za and `ia '
if pbc != '000':
if rcut < 9.0: raise '#ERROR: rcut too small for systems with pbc'
assert iloc, '#ERROR: for periodic system, plz use atomic rpst'
zs, coords = update_m(obj, ia, rcut=rcut, pbc=pbc)
# after update of `m, the query atom `ia will become the first atom
ia = 0
# bop potential distribution
r0 = 0.1
nx = int((rcut - r0)/dgrid) + 1
coeff = 1/np.sqrt(2*sigma**2*np.pi) if normalize else 1.0
if iloc:
ys = fget_sbop_local(coords, zs, ia, z1, z2, rcut, nx, dgrid, sigma, coeff, rpower)
else:
ys = fget_sbop(coords, zs, z1, z2, rcut, nx, dgrid, sigma, coeff, rpower)
return ys
def get_sbot(mbtype, obj, iloc=False, ia=None, normalize=True, sigma=0.05, \
rcut=4.8, dgrid=0.0262, pbc='000'):
"""
sigma -- standard deviation of gaussian distribution centered on a specific angle
defaults to 0.05 (rad), approximately 3 degree
dgrid -- step of angle grid
defaults to 0.0262 (rad), approximately 1.5 degree
"""
z1, z2, z3 = mbtype
zs, coords, c = obj
if iloc:
assert ia != None, '#ERROR: plz specify `za and `ia '
if pbc != '000':
assert iloc, '#ERROR: for periodic system, plz use atomic rpst'
zs, coords = update_m(obj, ia, rcut=rcut, pbc=pbc)
# after update of `m, the query atom `ia will become the first atom
ia = 0
# for a normalized gaussian distribution, u should multiply this coeff
coeff = 1/np.sqrt(2*sigma**2*np.pi) if normalize else 1.0
# Setup grid in Python
d2r = np.pi/180 # degree to rad
a0 = -20.0*d2r
a1 = np.pi + 20.0*d2r
nx = int((a1-a0)/dgrid) + 1
if iloc:
ys = fget_sbot_local(coords, zs, ia, z1, z2, z3, rcut, nx, dgrid, sigma, coeff)
else:
ys = fget_sbot(coords, zs, z1, z2, z3, rcut, nx, dgrid, sigma, coeff)
return ys
|
qmlcode/qml
|
qml/slatm.py
|
Python
|
mit
| 5,503
|
[
"Gaussian"
] |
bda4101af33ff4841ee40206151a45f825fedef7b5a51fe3a82a33684107d091
|
__author__ = 'USER'
import random
from common import math_util
class NeuralNetwork:
@property
def output_matrix(self):
return self._output_matrix
@output_matrix.setter
def output_matrix(self, __output_matrix):
self._output_matrix = __output_matrix
@output_matrix.deleter
def output_matrix(self):
del self._output_matrix
@property
def delta_matrix(self):
return self._delta_matrix
@delta_matrix.setter
def delta_matrix(self, _delta_matrix):
self._delta_matrix = _delta_matrix
@delta_matrix.deleter
def delta_matrix(self):
del self._delta_matrix
@property
def weight_matrix(self):
return self._weight_matrix
@weight_matrix.setter
def weight_matrix(self, _weight_matrix):
self._weight_matrix = _weight_matrix
@weight_matrix.deleter
def weight_matrix(self):
del self._weight_matrix
@property
def number_of_layers(self):
return self._number_of_layers
@number_of_layers.setter
def number_of_layers(self, _number_of_layers):
self._number_of_layers = _number_of_layers
@number_of_layers.deleter
def number_of_layers(self):
del self._number_of_layers
@property
def size_of_layers(self):
return self._size_of_layers
@size_of_layers.setter
def size_of_layers(self, _size_of_layers):
self._size_of_layers = _size_of_layers
@size_of_layers.deleter
def size_of_layers(self):
del self._size_of_layers
@property
def learning_rate(self):
return self._learning_rate
@learning_rate.setter
def learning_rate(self, _learning_rate):
self._learning_rate = _learning_rate
@learning_rate.deleter
def learning_rate(self):
del self._learning_rate
@property
def momentum(self):
return self._momentum
@momentum.setter
def momentum(self, _momentum):
self._momentum = _momentum
@momentum.deleter
def momentum(self):
del self._momentum
@property
def prev_delta_weight(self):
return self._prev_delta_weight
@prev_delta_weight.setter
def prev_delta_weight(self, _prev_delta_weight):
self._prev_delta_weight = _prev_delta_weight
@prev_delta_weight.deleter
def prev_delta_weight(self):
del self._prev_delta_weight
def __init__(self, _number_of_layers, _size_array, _learning_rate=0.3, _momentum=0.1):
"""
Initialize the network
:param _number_of_layers: The number of layers of the network.
:param _size_array: The array includes each sizes of layers.
:param _learning_rate: The learning rate of the network. Default value is 0.3
:param _momentum: The momentum parameter of the network. Default value is 0.1
:return:
"""
self._number_of_layers = _number_of_layers
self._learning_rate = _learning_rate
self._momentum = _momentum
self._delta_matrix = []
self._weight_matrix = []
self._prev_delta_weight = []
self._size_of_layers = _size_array
# self.size_of_layers = _size_array
# Initially assign the memory space needed. output_matrix, delta_matrix => 2-dimensional list space. weight_matrix, prev_delta_weight => 3-dimensional list space.
self.output_matrix = [[0 for j in range(_size_array[i])] for i in range(0, _number_of_layers)]
self.delta_matrix = [[0 for j in range(_size_array[i])] for i in range(1, _number_of_layers)]
self.weight_matrix = [ [[0 for k in range(_size_array[i-1]+1)] for j in range(_size_array[i])] for i in range(1, _number_of_layers) ]
self.prev_delta_weight = [[[0 for k in range(_size_array[i-1]+1)] for j in range(_size_array[i])] for i in range(1, _number_of_layers)]
link_vacancy = None
self.delta_matrix = [link_vacancy] + self.delta_matrix
self.weight_matrix = [link_vacancy] + self.weight_matrix
self.prev_delta_weight = [link_vacancy] + self.prev_delta_weight
for i in range(1, _number_of_layers):
for j in range(0, _size_array[i]):
for k in range(0, _size_array[i-1]+1):
self.weight_matrix[i][j][k] = random.random()
self.prev_delta_weight[i][j][k] = 0.0
def feedforward(self, input_list):
"""
Feed forward activations for one set of inputs.
:param input_list: Input data for network.
:return: Void
"""
# Elements in 0'th output matrix are input data.
for i in range(0, len(input_list)):
self.output_matrix[0][i] = input_list[i]
# Apply activation value to each neuron using sigmoid function.
for i in range(1, self.number_of_layers):
for j in range(0, self.size_of_layers[i]):
multiple_sum = 0
for k in range(0, self.size_of_layers[i-1]):
multiple_sum += self.output_matrix[i-1][k] * self.weight_matrix[i][j][k] # Apply weight to inputs and add to sum.
multiple_sum += self.weight_matrix[i][j][self.size_of_layers[i-1]] # Apply bias.
self.output_matrix[i][j] = math_util.sigmoid(multiple_sum) # Apply sigmoid function.
def backpropagate(self, input_list, target_list):
"""
Back propagate error for one set of input.
:param input_list: Input data for network.
:param target_list: Void
:return:
"""
self.feedforward(input_list) # Update output value for each neuron.
# Find delta for output layer.
for i in range(0, self.size_of_layers[self.number_of_layers-1]):
self.delta_matrix[self.number_of_layers-1][i] = self.output_matrix[self.number_of_layers-1][i] * (1-self.output_matrix[self.number_of_layers-1][i]) * (target_list[i] - self.output_matrix[self.number_of_layers-1][i])
# Find delta for hidden layer.
for i in range(self.number_of_layers-2, 0, -1):
for j in range(0, self.size_of_layers[i]):
multiple_sum = 0
for k in range(0, self.size_of_layers[i+1]):
multiple_sum += self.delta_matrix[i+1][k] * self.weight_matrix[i+1][k][j]
self.delta_matrix[i][j] = self.output_matrix[i][j] * (1-self.output_matrix[i][j]) * multiple_sum
# Iteration for weight matrix.
for i in range(1, self.number_of_layers):
for j in range(0, self.size_of_layers[i]):
for k in range(0, self.size_of_layers[i-1]):
# Apply momentum.
self.weight_matrix[i][j][k] += self.momentum * self.prev_delta_weight[i][j][k]
# Apply momentum of bias to weight matrix.
self.weight_matrix[i][j][self.size_of_layers[i-1]] += self.momentum * self.prev_delta_weight[i][j][self.size_of_layers[i-1]]
# Iteration for previous delta.
for i in range(1, self.number_of_layers):
for j in range(0, self.size_of_layers[i]):
for k in range(0, self.size_of_layers[i-1]):
# Apply weights using steepest descent.
self.prev_delta_weight[i][j][k] = self.learning_rate * self.delta_matrix[i][j] * self.output_matrix[i-1][k]
self.weight_matrix[i][j][k] += self.prev_delta_weight[i][j][k]
# Apply learning rate
self.prev_delta_weight[i][j][self.size_of_layers[i-1]] = self.learning_rate * self.delta_matrix[i][j]
self.weight_matrix[i][j][self.size_of_layers[i-1]] += self.prev_delta_weight[i][j][self.size_of_layers[i-1]]
def mean_squared_error(self, target_list):
"""
Mean squared error for the network.
:param target_list: Target list to compare.
:return: Mean squared error.
"""
mse = 0
for i in range(0, self.size_of_layers[self.number_of_layers-1]):
mse += (target_list[i]-self.output(i)) * (target_list[i] - self.output(i))
return mse / 2
def output(self, index):
"""
Return values of the output layer.
:param index: Specific index of output value.
:return: Value of the output.
"""
return self.output_matrix[self.number_of_layers-1][index]
pass
|
ParkJinSang/Logle
|
learning/mlp/neuralnetwork.py
|
Python
|
mit
| 8,413
|
[
"NEURON"
] |
358358048eb2bde7d13c3b41512855740367fe54cdb8c89173e50658d908bd3d
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Votca(CMakePackage):
"""VOTCA is a software package which focuses on the analysis of molecular
dynamics data, the development of systematic coarse-graining techniques
as well as methods used for simulating microscopic charge (and exciton)
transport in disordered semiconductors.
"""
homepage = "https://www.votca.org"
url = "https://github.com/votca/votca/tarball/v2022-rc.1"
git = "https://github.com/votca/xtp.git"
maintainers = ['junghans']
version('master', branch='master')
version('stable', branch='stable')
version('2022-rc.2', sha256='eefde51470ec1437d0127fb02c2745f33e434deff53cdaee97691c36ce447fb1')
version('2022-rc.1', sha256='d53ca9fde364a97d91bf3bed15223536ffa598b2dec7bccd459accae265391b1')
variant('mkl', default=False, description='Build with MKL support')
variant('new-gmx', default=False, description='Build against gromacs>2019 - no tabulated kernels')
conflicts('votca-tools')
conflicts('votca-csg')
conflicts('votca-xtp')
depends_on("cmake@3.13:", type='build')
depends_on("expat")
depends_on("fftw-api@3")
depends_on("eigen@3.3:")
depends_on("boost")
depends_on('mkl', when='+mkl')
depends_on("libxc")
depends_on("hdf5+cxx~mpi")
depends_on("libint@2.6.0:")
depends_on("libecpint")
depends_on("py-h5py")
depends_on("py-lxml")
depends_on("gromacs~mpi@5.1:")
depends_on("gromacs~mpi@5.1:2019", when="~new-gmx")
depends_on('lammps', type='test')
depends_on('py-espresso', type='test')
depends_on('py-pytest', type='test')
def cmake_args(self):
args = [
'-DINSTALL_RC_FILES=OFF',
'-DBUILD_XTP=ON',
'-DBUILD_CSGAPPS=ON',
]
if '~mkl' in self.spec:
args.append('-DCMAKE_DISABLE_FIND_PACKAGE_MKL=ON')
if self.run_tests:
args.append('-DENABLE_TESTING=ON')
args.append('-DENABLE_REGRESSION_TESTING=ON')
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/votca/package.py
|
Python
|
lgpl-2.1
| 2,236
|
[
"ESPResSo",
"Gromacs",
"LAMMPS"
] |
f86a489ea415d3700e51b9ca27aaca2b3066c30bc183f5f1242ef7b26d87a303
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
**ExtAnalyze** - Integrator Extension
***************************************
This class can be used to execute nearly all analysis objects
within the main integration loop which allows to automatically
accumulate time averages (with standard deviation error bars).
Example Usage:
-----------------
>>> pt = espressopp.analysis.PressureTensor(system)
>>> extension_pt = espressopp.integrator.ExtAnalyze(pt , interval=100)
>>> integrator.addExtension(extension_pt)
>>> integrator.run(10000)
>>>
>>> pt_ave = pt.getAverageValue()
>>> print "average Pressure Tensor = ", pt_ave[:6]
>>> print " std deviation = ", pt_ave[6:]
>>> print "number of measurements = ", pt.getNumberOfMeasurements()
.. function:: espressopp.integrator.ExtAnalyze(action_obj, interval)
:param action_obj:
:param interval: (default: 1)
:type action_obj:
:type interval: int
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_ExtAnalyze
class ExtAnalyzeLocal(ExtensionLocal, integrator_ExtAnalyze):
def __init__(self, action_obj, interval=1):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_ExtAnalyze, action_obj, interval)
if pmi.isController :
class ExtAnalyze(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.ExtAnalyzeLocal',
)
|
junghans/espressopp
|
src/integrator/ExtAnalyze.py
|
Python
|
gpl-3.0
| 2,446
|
[
"ESPResSo"
] |
9a16497b3016e03b5f8eb02126cb11a35db9d0a68be4a4d91d31e64920a0d97b
|
#!/usr/bin/env python
#
# $File: PyQuanTrait.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
import random
pop = sim.Population(size=5000, loci=2, infoFields=['qtrait1', 'qtrait2', 'age'])
pop.setVirtualSplitter(sim.InfoSplitter(field='age', cutoff=[40]))
def qtrait(geno, age):
'Return two traits that depends on genotype and age'
return random.normalvariate(age * sum(geno), 10), random.randint(0, 10*sum(geno))
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.2, 0.8]),
],
matingScheme=sim.RandomMating(),
postOps=[
# use random age for simplicity
sim.InitInfo(lambda:random.randint(20, 75), infoFields='age'),
sim.PyQuanTrait(loci=(0,1), func=qtrait, infoFields=['qtrait1', 'qtrait2']),
sim.Stat(meanOfInfo=['qtrait1'], subPops=[(0, sim.ALL_AVAIL)],
vars='meanOfInfo_sp'),
sim.PyEval(r"'Mean of trait1: %.3f (age < 40), %.3f (age >=40)\n' % "
"(subPop[(0,0)]['meanOfInfo']['qtrait1'], subPop[(0,1)]['meanOfInfo']['qtrait1'])"),
],
gen = 5
)
|
BoPeng/simuPOP
|
docs/PyQuanTrait.py
|
Python
|
gpl-2.0
| 2,087
|
[
"VisIt"
] |
3d2358e216d6ce65c6a8855c93fb7df9ee89060688f88de218a6659286b03cd6
|
from ase.structure import molecule
from gpaw import GPAW
a = 8.0
h = 0.2
energies = {}
resultfile = open('results-%.2f.txt' % h, 'w')
for name in ['H2O', 'H', 'O']:
system = molecule(name)
system.set_cell((a, a, a))
system.center()
calc = GPAW(h=h,
txt='gpaw-%s-%.2f.txt' % (name, h))
if name == 'H' or name == 'O':
calc.set(hund=True)
system.set_calculator(calc)
energy = system.get_potential_energy()
energies[name] = energy
print >> resultfile, name, energy
e_atomization = energies['H2O'] - 2 * energies['H'] - energies['O']
print >> resultfile, e_atomization
|
robwarm/gpaw-symm
|
doc/exercises/water/h2o.py
|
Python
|
gpl-3.0
| 631
|
[
"ASE",
"GPAW"
] |
a23254b8fcba2aa4a09287f7d49a231ab8cd32252ea3e3ee974ed86d7eb73380
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import numpy as np
from collections import OrderedDict
from psi4 import core
from psi4.driver.p4util.exceptions import *
from psi4.driver.p4util import solvers
from .sapt_util import print_sapt_var
__all__ = ["compute_sapt_sf"]
def _sf_compute_JK(jk, Cleft, Cright, rotation=None):
"""
A specialized JK computer class for terms that arrise from SF-SAPT.
The density is computed as (Cl_mu,i rotation_ij Cr_nu,j) where the rotation
is an arbitrary perturbation on the density.
"""
# Handle both list and single value input
return_single = False
if not isinstance(Cleft, (list, tuple)):
Cleft = [Cleft]
return_single = True
if not isinstance(Cright, (list, tuple)):
Cright = [Cright]
return_single = True
if (not isinstance(rotation, (list, tuple))) and (rotation is not None):
rotation = [rotation]
return_single = True
if len(Cleft) != len(Cright):
raise ValidationError("Cleft list is not the same length as Cright list")
jk.C_clear()
zero_append = []
num_compute = 0
for num in range(len(Cleft)):
Cl = Cleft[num]
Cr = Cright[num]
if (Cr.shape[1] == 0) or (Cl.shape[1] == 0):
zero_append.append(num)
continue
if (rotation is not None) and (rotation[num] is not None):
mol = Cl.shape[1]
mor = Cr.shape[1]
if (rotation[num].shape[0] != mol) or (rotation[num].shape[1] != mor):
raise ValidationError("_sf_compute_JK: Tensor size does not match Cl (%d) /Cr (%d) : %s" %
(mol, mor, str(rotation[num].shape)))
# Figure out the small MO index to contract to
if mol < mor:
Cl = np.dot(Cl, rotation[num])
else:
Cr = np.dot(Cr, rotation[num].T)
Cl = core.Matrix.from_array(Cl)
Cr = core.Matrix.from_array(Cr)
jk.C_left_add(Cl)
jk.C_right_add(Cr)
num_compute += 1
jk.compute()
J_list = []
K_list = []
for num in range(num_compute):
J_list.append(np.array(jk.J()[num]))
K_list.append(np.array(jk.K()[num]))
jk.C_clear()
nbf = J_list[0].shape[0]
zero_mat = np.zeros((nbf, nbf))
for num in zero_append:
J_list.insert(num, zero_mat)
K_list.insert(num, zero_mat)
if return_single:
return J_list[0], K_list[0]
else:
return J_list, K_list
def _chain_dot(*dot_list):
"""
A simple chain dot function unpacked from *args.
"""
result = dot_list[0]
for x in range(len(dot_list) - 1):
result = np.dot(result, dot_list[x + 1])
return result
def compute_sapt_sf(dimer, jk, wfn_A, wfn_B, do_print=True):
"""
Computes Elst and Spin-Flip SAPT0 for ROHF wavefunctions
"""
if do_print:
core.print_out("\n ==> Preparing SF-SAPT Data Cache <== \n\n")
jk.print_header()
### Build intermediates
# Pull out Wavefunction A quantities
ndocc_A = wfn_A.doccpi().sum()
nsocc_A = wfn_A.soccpi().sum()
Cocc_A = np.asarray(wfn_A.Ca_subset("AO", "OCC"))
Ci = Cocc_A[:, :ndocc_A]
Ca = Cocc_A[:, ndocc_A:]
Pi = np.dot(Ci, Ci.T)
Pa = np.dot(Ca, Ca.T)
mints = core.MintsHelper(wfn_A.basisset())
V_A = mints.ao_potential()
# Pull out Wavefunction B quantities
ndocc_B = wfn_B.doccpi().sum()
nsocc_B = wfn_B.soccpi().sum()
Cocc_B = np.asarray(wfn_B.Ca_subset("AO", "OCC"))
Cj = Cocc_B[:, :ndocc_B]
Cb = Cocc_B[:, ndocc_B:]
Pj = np.dot(Cj, Cj.T)
Pb = np.dot(Cb, Cb.T)
mints = core.MintsHelper(wfn_B.basisset())
V_B = mints.ao_potential()
# Pull out generic quantities
S = np.asarray(wfn_A.S())
intermonomer_nuclear_repulsion = dimer.nuclear_repulsion_energy()
intermonomer_nuclear_repulsion -= wfn_A.molecule().nuclear_repulsion_energy()
intermonomer_nuclear_repulsion -= wfn_B.molecule().nuclear_repulsion_energy()
num_el_A = (2 * ndocc_A + nsocc_A)
num_el_B = (2 * ndocc_B + nsocc_B)
### Build JK Terms
if do_print:
core.print_out("\n ==> Computing required JK matrices <== \n\n")
# Writen so that we can reorganize order to save on DF-JK cost.
pairs = [("ii", Ci, None, Ci),
("ij", Ci, _chain_dot(Ci.T, S, Cj), Cj),
("jj", Cj, None, Cj),
("aa", Ca, None, Ca),
("aj", Ca, _chain_dot(Ca.T, S, Cj), Cj),
("ib", Ci, _chain_dot(Ci.T, S, Cb), Cb),
("bb", Cb, None, Cb),
("ab", Ca, _chain_dot(Ca.T, S, Cb), Cb)]
# Reorganize
names = [x[0] for x in pairs]
Cleft = [x[1] for x in pairs]
rotations = [x[2] for x in pairs]
Cright = [x[3] for x in pairs]
tmp_J, tmp_K = _sf_compute_JK(jk, Cleft, Cright, rotations)
J = {key: val for key, val in zip(names, tmp_J)}
K = {key: val for key, val in zip(names, tmp_K)}
### Compute Terms
if do_print:
core.print_out("\n ==> Computing Spin-Flip Exchange and Electrostatics <== \n\n")
w_A = V_A + 2 * J["ii"] + J["aa"]
w_B = V_B + 2 * J["jj"] + J["bb"]
h_Aa = V_A + 2 * J["ii"] + J["aa"] - K["ii"] - K["aa"]
h_Ab = V_A + 2 * J["ii"] + J["aa"] - K["ii"]
h_Ba = V_B + 2 * J["jj"] + J["bb"] - K["jj"]
h_Bb = V_B + 2 * J["jj"] + J["bb"] - K["jj"] - K["bb"]
### Build electrostatics
# socc/socc term
two_el_repulsion = np.vdot(Pa, J["bb"])
attractive_a = np.vdot(V_A, Pb) * nsocc_A / num_el_A
attractive_b = np.vdot(V_B, Pa) * nsocc_B / num_el_B
nuclear_repulsion = intermonomer_nuclear_repulsion * nsocc_A * nsocc_B / (num_el_A * num_el_B)
elst_abab = two_el_repulsion + attractive_a + attractive_b + nuclear_repulsion
# docc/socc term
two_el_repulsion = np.vdot(Pi, J["bb"])
attractive_a = np.vdot(V_A, Pb) * ndocc_A / num_el_A
attractive_b = np.vdot(V_B, Pi) * nsocc_B / num_el_B
nuclear_repulsion = intermonomer_nuclear_repulsion * ndocc_A * nsocc_B / (num_el_A * num_el_B)
elst_ibib = 2 * (two_el_repulsion + attractive_a + attractive_b + nuclear_repulsion)
# socc/docc term
two_el_repulsion = np.vdot(Pa, J["jj"])
attractive_a = np.vdot(V_A, Pj) * nsocc_A / num_el_A
attractive_b = np.vdot(V_B, Pa) * ndocc_B / num_el_B
nuclear_repulsion = intermonomer_nuclear_repulsion * nsocc_A * ndocc_B / (num_el_A * num_el_B)
elst_jaja = 2 * (two_el_repulsion + attractive_a + attractive_b + nuclear_repulsion)
# docc/docc term
two_el_repulsion = np.vdot(Pi, J["jj"])
attractive_a = np.vdot(V_A, Pj) * ndocc_A / num_el_A
attractive_b = np.vdot(V_B, Pi) * ndocc_B / num_el_B
nuclear_repulsion = intermonomer_nuclear_repulsion * ndocc_A * ndocc_B / (num_el_A * num_el_B)
elst_ijij = 4 * (two_el_repulsion + attractive_a + attractive_b + nuclear_repulsion)
elst = elst_abab + elst_ibib + elst_jaja + elst_ijij
# print(print_sapt_var("Elst,10", elst))
### Start diagonal exchange
exch_diag = 0.0
exch_diag -= np.vdot(Pj, 2 * K["ii"] + K["aa"])
exch_diag -= np.vdot(Pb, K["ii"])
exch_diag -= np.vdot(_chain_dot(Pi, S, Pj), (h_Aa + h_Ab + h_Ba + h_Bb))
exch_diag -= np.vdot(_chain_dot(Pa, S, Pj), (h_Aa + h_Ba))
exch_diag -= np.vdot(_chain_dot(Pi, S, Pb), (h_Ab + h_Bb))
exch_diag += 2.0 * np.vdot(_chain_dot(Pj, S, Pi, S, Pb), w_A)
exch_diag += 2.0 * np.vdot(_chain_dot(Pj, S, Pi, S, Pj), w_A)
exch_diag += np.vdot(_chain_dot(Pb, S, Pi, S, Pb), w_A)
exch_diag += np.vdot(_chain_dot(Pj, S, Pa, S, Pj), w_A)
exch_diag += 2.0 * np.vdot(_chain_dot(Pi, S, Pj, S, Pi), w_B)
exch_diag += 2.0 * np.vdot(_chain_dot(Pi, S, Pj, S, Pa), w_B)
exch_diag += np.vdot(_chain_dot(Pi, S, Pb, S, Pi), w_B)
exch_diag += np.vdot(_chain_dot(Pa, S, Pj, S, Pa), w_B)
exch_diag -= 2.0 * np.vdot(_chain_dot(Pi, S, Pj), K["ij"])
exch_diag -= 2.0 * np.vdot(_chain_dot(Pa, S, Pj), K["ij"])
exch_diag -= 2.0 * np.vdot(_chain_dot(Pi, S, Pb), K["ij"])
exch_diag -= np.vdot(_chain_dot(Pa, S, Pj), K["aj"])
exch_diag -= np.vdot(_chain_dot(Pi, S, Pb), K["ib"])
# print(print_sapt_var("Exch10,offdiagonal", exch_diag))
### Start off-diagonal exchange
exch_offdiag = 0.0
exch_offdiag -= np.vdot(Pb, K["aa"])
exch_offdiag -= np.vdot(_chain_dot(Pa, S, Pb), (h_Aa + h_Bb))
exch_offdiag += np.vdot(_chain_dot(Pa, S, Pj), K["bb"])
exch_offdiag += np.vdot(_chain_dot(Pi, S, Pb), K["aa"])
exch_offdiag += 2.0 * np.vdot(_chain_dot(Pj, S, Pa, S, Pb), w_A)
exch_offdiag += np.vdot(_chain_dot(Pb, S, Pa, S, Pb), w_A)
exch_offdiag += 2.0 * np.vdot(_chain_dot(Pi, S, Pb, S, Pa), w_B)
exch_offdiag += np.vdot(_chain_dot(Pa, S, Pb, S, Pa), w_B)
exch_offdiag -= 2.0 * np.vdot(_chain_dot(Pa, S, Pb), K["ij"])
exch_offdiag -= 2.0 * np.vdot(_chain_dot(Pa, S, Pb), K["ib"])
exch_offdiag -= 2.0 * np.vdot(_chain_dot(Pa, S, Pj), K["ab"])
exch_offdiag -= 2.0 * np.vdot(_chain_dot(Pa, S, Pj), K["ib"])
exch_offdiag -= np.vdot(_chain_dot(Pa, S, Pb), K["ab"])
# print(print_sapt_var("Exch10,off-diagonal", exch_offdiag))
# print(print_sapt_var("Exch10(S^2)", exch_offdiag + exch_diag))
ret_values = OrderedDict({
"Elst10": elst,
"Exch10(S^2) [diagonal]": exch_diag,
"Exch10(S^2) [off-diagonal]": exch_offdiag,
"Exch10(S^2) [highspin]": exch_offdiag + exch_diag,
})
return ret_values
|
CDSherrill/psi4
|
psi4/driver/procrouting/sapt/sapt_sf_terms.py
|
Python
|
lgpl-3.0
| 10,386
|
[
"Psi4"
] |
8433d35c6302187b051ba3bcf83d8f662c71c2dfe7ca7f9656c1c12dee54da00
|
#################################################################
# Importing samtools, bcftools, and htslib
#
# For each package PKG:
#
# rm -rf PKG
# python import.py PKG path/to/download/PKG-X.Y
import fnmatch
import os
import re
import itertools
import shutil
import sys
import hashlib
EXCLUDE = {
"samtools": (
"test", "misc",
"razip.c",
"bgzip.c",
"main.c",
"calDepth.c",
"bam2bed.c",
"bam_tview.c",
"bam_tview.h",
"bam_tview_html.c",
"bam_tview_curses.c",
"bamcheck.c",
"chk_indel.c",
"vcf-miniview.c",
),
"bcftools": (
"test", "plugins", "peakfit.c",
"peakfit.h",
# needs to renamed, name conflict with samtools reheader
# "reheader.c",
"polysomy.c"),
"htslib": (
'htslib/tabix.c', 'htslib/bgzip.c',
'htslib/htsfile.c',
"test", "tests"),
}
MAIN = {
"samtools": "bamtk",
"bcftools": "main"
}
C_VERSION = {
"htslib": "HTS_VERSION_TEXT",
"samtools": "SAMTOOLS_VERSION",
"bcftools": "BCFTOOLS_VERSION"
}
def locate(pattern, root=os.curdir, exclude=[], exclude_htslib=False):
'''Locate all files matching supplied filename pattern (but not listed
in exclude) in and below the supplied root directory. Omit any files under
directories listed in exclude or (if exclude_htslib=True) matching /htslib-*/.
'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
if filename not in exclude:
yield os.path.join(path, filename)
for dirname in exclude:
if dirname in dirs: dirs.remove(dirname)
if exclude_htslib:
for dirname in [d for d in dirs if re.match(r"htslib-", d)]:
dirs.remove(dirname)
def _update_pysam_files(cf, destdir):
'''update pysam files applying redirection of ouput'''
basename = os.path.basename(destdir)
for filename in cf:
if not filename:
continue
dest = filename + ".pysam.c"
with open(filename, encoding="utf-8") as infile:
lines = "".join(infile.readlines())
with open(dest, "w", encoding="utf-8") as outfile:
outfile.write('#include "{}.pysam.h"\n\n'.format(basename))
subname, _ = os.path.splitext(os.path.basename(filename))
if subname in MAIN.get(basename, []):
lines = re.sub(r"int main\(", "int {}_main(".format(
basename), lines)
else:
lines = re.sub(r"int main\(", "int {}_{}_main(".format(
basename, subname), lines)
if basename == "samtools":
lines = re.sub(r"main_(reheader)\(",
r"samtools_main_\1(", lines)
lines = re.sub(r"\b({}_stdout)\b".format(basename), r"\1_internal", lines)
lines = re.sub(r"\bexit\(", "{}_exit(".format(basename), lines)
lines = re.sub(r"\bstderr\b", "{}_stderr".format(basename), lines)
lines = re.sub(r"\bstdout\b", "{}_stdout".format(basename), lines)
lines = re.sub(r" printf\(", " fprintf({}_stdout, ".format(basename), lines)
lines = re.sub(r"([^kf])puts\(", r"\1{}_puts(".format(basename), lines)
lines = re.sub(r"putchar\(([^)]+)\)",
r"fputc(\1, {}_stdout)".format(basename), lines)
fn = os.path.basename(filename)
# some specific fixes:
SPECIFIC_SUBSTITUTIONS = {
"bam_md.c": (
'sam_open_format("-", mode_w',
'sam_open_format({}_stdout_fn, mode_w'.format(basename)),
"phase.c": (
'putc("ACGT"[f->seq[j] == 1? (c&3, {}_stdout) : (c>>16&3)]);'.format(basename),
'putc("ACGT"[f->seq[j] == 1? (c&3) : (c>>16&3)], {}_stdout);'.format(basename)),
"cut_target.c": (
'putc(33 + (cns[j]>>8>>2, {}_stdout));'.format(basename),
'putc(33 + (cns[j]>>8>>2), {}_stdout);'.format(basename))
}
if fn in SPECIFIC_SUBSTITUTIONS:
lines = lines.replace(
SPECIFIC_SUBSTITUTIONS[fn][0],
SPECIFIC_SUBSTITUTIONS[fn][1])
if fn == "bamtk.c":
lines = re.sub(r'(#include "version.h")', r'\1\n#include "samtools_config_vars.h"', lines)
lines = re.sub(r'(else if.*"tview")', r'//\1', lines)
outfile.write(lines)
with open(os.path.join("import", "pysam.h")) as inf, \
open(os.path.join(destdir, "{}.pysam.h".format(basename)), "w") as outf:
outf.write(re.sub("@pysam@", basename, inf.read()))
with open(os.path.join("import", "pysam.c")) as inf, \
open(os.path.join(destdir, "{}.pysam.c".format(basename)), "w") as outf:
outf.write(re.sub("@pysam@", basename, inf.read()))
if len(sys.argv) >= 1:
if len(sys.argv) != 3:
raise ValueError("import requires dest src")
dest, srcdir = sys.argv[1:3]
if dest not in EXCLUDE:
raise ValueError("import expected one of %s" %
",".join(EXCLUDE.keys()))
exclude = EXCLUDE[dest]
destdir = os.path.abspath(dest)
srcdir = os.path.abspath(srcdir)
if not os.path.exists(srcdir):
raise IOError(
"source directory `%s` does not exist." % srcdir)
cfiles = locate("*.c", srcdir, exclude=exclude, exclude_htslib=True)
hfiles = locate("*.h", srcdir, exclude=exclude, exclude_htslib=True)
mfiles = itertools.chain(locate("README", srcdir), locate("LICENSE", srcdir),
locate("version.sh", srcdir, exclude_htslib=True))
if dest == "htslib":
# Add build files, including *.ac *.in *.mk *.m4
mfiles = itertools.chain(mfiles, locate("Makefile", srcdir),
locate("configure", srcdir),
locate("*.[aim][cnk4]", srcdir))
ncopied = 0
def _compareAndCopy(src, srcdir, destdir, exclude):
d, f = os.path.split(src)
common_prefix = os.path.commonprefix((d, srcdir))
subdir = re.sub(common_prefix, "", d)[1:]
targetdir = os.path.join(destdir, subdir)
if not os.path.exists(targetdir):
os.makedirs(targetdir)
old_file = os.path.join(targetdir, f)
if os.path.exists(old_file):
md5_old = hashlib.md5(
"".join(open(old_file, "r", encoding="utf-8").readlines()).encode()).digest()
md5_new = hashlib.md5(
"".join(open(src, "r", encoding="utf-8").readlines()).encode()).digest()
if md5_old != md5_new:
raise ValueError(
"incompatible files for %s and %s" %
(old_file, src))
shutil.copy(src, targetdir)
return old_file
for src_file in hfiles:
_compareAndCopy(src_file, srcdir, destdir, exclude)
ncopied += 1
for src_file in mfiles:
_compareAndCopy(src_file, srcdir, destdir, exclude)
ncopied += 1
cf = []
for src_file in cfiles:
cf.append(_compareAndCopy(src_file,
srcdir,
destdir,
exclude))
ncopied += 1
sys.stdout.write(
"installed latest source code from %s: "
"%i files copied\n" % (srcdir, ncopied))
if dest in MAIN:
# redirect stderr to pysamerr and replace bam.h with a stub.
sys.stdout.write("applying stderr redirection\n")
_update_pysam_files(cf, destdir)
def _getVersion(srcdir):
with open(os.path.join(srcdir, "version.sh"), encoding="utf-8") as inf:
for line in inf:
m = re.match(r"VERSION=(\S+)", line)
if m: return m.group(1)
raise ValueError("no VERSION line in version.sh")
def _update_version_file(key, value, filename):
tmpfilename = filename + ".tmp"
with open(filename, encoding="utf-8") as inf:
with open(tmpfilename, "w", encoding="utf-8") as outf:
for line in inf:
if key in line:
line = re.sub(r'"[^"]*"', '"{}"'.format(value), line)
outf.write(line)
os.rename(tmpfilename, filename)
def _update_version_doc_file(dest, value, filename):
tmpfilename = filename + ".tmp"
with open(filename, encoding="utf-8") as inf:
with open(tmpfilename, "w", encoding="utf-8") as outf:
for line in inf:
if " wraps " in line:
# hide the sentence's fullstop from the main regexp
line = re.sub(r'\.$', ',DOT', line)
line = re.sub(r'{}-[^*,]*'.format(dest),
'{}-{}'.format(dest, value), line)
line = re.sub(',DOT', '.', line)
outf.write(line)
os.rename(tmpfilename, filename)
version = _getVersion(srcdir)
_update_version_file("__{}_version__".format(dest), version, "pysam/version.py")
_update_version_file(C_VERSION[dest], version + " (pysam)", "pysam/version.h")
_update_version_doc_file(dest, version, "README.rst")
_update_version_doc_file(dest, version, "doc/index.rst")
sys.exit(0)
# if len(sys.argv) >= 2 and sys.argv[1] == "refresh":
# sys.stdout.write("refreshing latest source code from .c to .pysam.c")
# # redirect stderr to pysamerr and replace bam.h with a stub.
# sys.stdout.write("applying stderr redirection")
# for destdir in ('samtools', ):
# pysamcfiles = locate("*.pysam.c", destdir)
# for f in pysamcfiles:
# os.remove(f)
# cfiles = locate("*.c", destdir)
# _update_pysam_files(cfiles, destdir)
# sys.exit(0)
|
pysam-developers/pysam
|
devtools/import.py
|
Python
|
mit
| 10,243
|
[
"pysam"
] |
b0caedda86bb58abb984b4ae5e6c7e4ec3c02fe5dee3283dec6d81715c779b18
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of Neural Net (NN) functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.distribute import distribution_strategy_context as ds
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops # pylint: disable=unused-import
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import util as losses_util
from tensorflow.python.platform import device_context
from tensorflow.python.util import dispatch
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
@tf_export("nn.log_poisson_loss")
@dispatch.add_dispatch_support
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
"""Computes log Poisson loss given `log_input`.
Gives the log-likelihood loss between the prediction and the target under the
assumption that the target has a Poisson distribution.
Caveat: By default, this is not the exact loss, but the loss minus a
constant term [log(z!)]. That has no effect for optimization, but
does not play well with relative loss comparisons. To compute an
approximation of the log factorial term, specify
compute_full_loss=True to enable Stirling's Approximation.
For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson
loss is
-log(exp(-x) * (x^z) / z!)
= -log(exp(-x) * (x^z)) + log(z!)
~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
[ Note the second term is the Stirling's Approximation for log(z!).
It is invariant to x and does not affect optimization, though
important for correct relative loss comparisons. It is only
computed when compute_full_loss == True. ]
= x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
targets: A `Tensor` of the same type and shape as `log_input`.
log_input: A `Tensor` of type `float32` or `float64`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
ValueError: If `log_input` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name:
log_input = ops.convert_to_tensor(log_input, name="log_input")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().assert_is_compatible_with(log_input.get_shape())
except ValueError:
raise ValueError(
"log_input and targets must have the same shape (%s vs %s)" %
(log_input.get_shape(), targets.get_shape()))
result = math_ops.exp(log_input) - log_input * targets
if compute_full_loss:
# need to create constant tensors here so that their dtypes can be matched
# to that of the targets.
point_five = constant_op.constant(0.5, dtype=targets.dtype)
two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype)
stirling_approx = (targets * math_ops.log(targets)) - targets + (
point_five * math_ops.log(two_pi * targets))
zeros = array_ops.zeros_like(targets, dtype=targets.dtype)
ones = array_ops.ones_like(targets, dtype=targets.dtype)
cond = math_ops.logical_and(targets >= zeros, targets <= ones)
result += array_ops.where(cond, zeros, stirling_approx)
return result
@tf_export(v1=["nn.sigmoid_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name
_sentinel=None,
labels=None,
logits=None,
name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
# pylint: disable=protected-access
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,
labels, logits)
# pylint: enable=protected-access
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().assert_is_compatible_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
(logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# Note that these two expressions can be combined into the following:
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
# To allow computing gradients at zero, we define custom versions of max and
# abs functions.
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
return math_ops.add(
relu_logits - logits * labels,
math_ops.log1p(math_ops.exp(neg_abs_logits)),
name=name)
# Note: intentionally calling this v2 to not allow existing code with indirect
# imports to ignore the sentinel behavior.
@tf_export("nn.sigmoid_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def sigmoid_cross_entropy_with_logits_v2( # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
Args:
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
return sigmoid_cross_entropy_with_logits(
logits=logits, labels=labels, name=name)
@tf_export("nn.weighted_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def weighted_cross_entropy_with_logits_v2(labels, logits, pos_weight,
name=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
labels * -log(sigmoid(logits)) +
(1 - labels) * -log(1 - sigmoid(logits))
A value `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
This can be seen from the fact that `pos_weight` is introduced as a
multiplicative coefficient for the positive labels term
in the loss expression:
labels * -log(sigmoid(logits)) * pos_weight +
(1 - labels) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = labels`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `labels` must have the same type and shape.
Args:
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().assert_is_compatible_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
(logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x
# To avoid branching, we use the combined version
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + (pos_weight - 1) * labels
return math_ops.add(
(1 - labels) * logits,
log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) +
nn_ops.relu(-logits)),
name=name)
@tf_export(v1=["nn.weighted_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecated_args(None, "targets is deprecated, use labels instead", "targets")
def weighted_cross_entropy_with_logits(labels=None,
logits=None,
pos_weight=None,
name=None,
targets=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
labels * -log(sigmoid(logits)) +
(1 - labels) * -log(1 - sigmoid(logits))
A value `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
This can be seen from the fact that `pos_weight` is introduced as a
multiplicative coefficient for the positive labels term
in the loss expression:
labels * -log(sigmoid(logits)) * pos_weight +
(1 - labels) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = labels`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `labels` must have the same type and shape.
Args:
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
targets: Deprecated alias for labels.
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
labels = deprecated_argument_lookup("labels", labels, "targets", targets)
return weighted_cross_entropy_with_logits_v2(labels, logits, pos_weight, name)
@tf_export("nn.compute_average_loss")
@dispatch.add_dispatch_support
def compute_average_loss(per_example_loss,
sample_weight=None,
global_batch_size=None):
"""Scales per-example losses with sample_weights and computes their average.
Usage with distribution strategy and custom training loop:
```python
with strategy.scope():
def compute_loss(labels, predictions, sample_weight=None):
# If you are using a `Loss` class instead, set reduction to `NONE` so that
# we can do the reduction afterwards and divide by global batch size.
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
# Compute loss that is scaled by sample_weight and by global batch size.
return tf.nn.compute_average_loss(
per_example_loss,
sample_weight=sample_weight,
global_batch_size=GLOBAL_BATCH_SIZE)
```
Args:
per_example_loss: Per-example loss.
sample_weight: Optional weighting for each example.
global_batch_size: Optional global batch size value. Defaults to (size of
first dimension of `losses`) * (number of replicas).
Returns:
Scalar loss value.
""" # pylint: disable=g-doc-exception
per_example_loss = ops.convert_to_tensor(per_example_loss)
input_dtype = per_example_loss.dtype
with losses_util.check_per_example_loss_rank(per_example_loss):
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight)
per_example_loss = losses_util.scale_losses_by_sample_weight(
per_example_loss, sample_weight)
per_example_loss = math_ops.cast(per_example_loss, input_dtype)
if global_batch_size is None:
if ds.has_strategy() and ds.in_cross_replica_context():
raise RuntimeError(
"You are calling `compute_average_loss` in cross replica context, "
"while it was expected to be called in replica context.")
num_replicas = ds.get_strategy().num_replicas_in_sync
per_replica_batch_size = array_ops.shape_v2(per_example_loss)[0]
global_batch_size = per_replica_batch_size * num_replicas
global_batch_size = math_ops.cast(global_batch_size, input_dtype)
return math_ops.reduce_sum(per_example_loss) / global_batch_size
@tf_export("nn.scale_regularization_loss")
@dispatch.add_dispatch_support
def scale_regularization_loss(regularization_loss):
"""Scales the sum of the given regularization losses by number of replicas.
Usage with distribution strategy and custom training loop:
```python
with strategy.scope():
def compute_loss(self, label, predictions):
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
# Compute loss that is scaled by sample_weight and by global batch size.
loss = tf.nn.compute_average_loss(
per_example_loss,
sample_weight=sample_weight,
global_batch_size=GLOBAL_BATCH_SIZE)
# Add scaled regularization losses.
loss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights))
return loss
```
Args:
regularization_loss: Regularization loss.
Returns:
Scalar loss value.
""" # pylint: disable=g-doc-exception
if ds.has_strategy() and ds.in_cross_replica_context():
raise RuntimeError(
"You are calling `scale_regularization_loss` in cross replica context, "
"while it was expected to be called in replica context.")
num_replicas = ds.get_strategy().num_replicas_in_sync
return math_ops.reduce_sum(regularization_loss) / num_replicas
@tf_export(v1=["nn.relu_layer"])
@dispatch.add_dispatch_support
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
@tf_export("nn.silu", "nn.swish")
@dispatch.add_dispatch_support
@custom_gradient.custom_gradient
def swish(features):
# pylint: disable=g-doc-args
"""Computes the SiLU or Swish activation function: `x * sigmoid(x)`.
The SiLU activation function was introduced in "Gaussian Error Linear Units
(GELUs)" [Hendrycks et al. 2016](https://arxiv.org/abs/1606.08415) and
"Sigmoid-Weighted Linear Units for Neural Network Function Approximation in
Reinforcement Learning"
[Elfwing et al. 2017](https://arxiv.org/abs/1702.03118) and was independently
discovered (and called swish) in "Searching for Activation Functions"
[Ramachandran et al. 2017](https://arxiv.org/abs/1710.05941)
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
# pylint: enable=g-doc-args
features = ops.convert_to_tensor(features, name="features")
def grad(dy):
"""Gradient for the Swish activation function"""
# Naively, x * tf.nn.sigmoid(x) requires keeping both x and sigmoid(x)
# around for backprop, effectively doubling the tensor's memory consumption.
# We use a control dependency here so that sigmoid(features) is re-computed
# during backprop (the control dep prevents it being de-duped with the
# forward pass) and we can free the sigmoid(features) expression immediately
# after use during the forward pass.
with ops.control_dependencies([dy]):
sigmoid_features = math_ops.sigmoid(features)
activation_grad = (
sigmoid_features * (1.0 + features * (1.0 - sigmoid_features)))
return dy * activation_grad
return features * math_ops.sigmoid(features), grad
# pylint: disable=redefined-builtin
@tf_export("linalg.normalize")
@dispatch.add_dispatch_support
def normalize(tensor, ord="euclidean", axis=None, name=None):
"""Normalizes `tensor` along dimension `axis` using specified norm.
This uses `tf.linalg.norm` to compute the norm along `axis`.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`,
`2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for
vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`,
'`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis`
on how to compute norms for a batch of vectors or matrices stored in a
tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the
input is considered a batch of vectors, and `axis` determines the axis in
`tensor` over which to compute vector norms. If `axis` is a 2-tuple of
Python integers it is considered a batch of matrices and `axis` determines
the axes in `tensor` over which to compute a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
name: The name of the op.
Returns:
normalized: A normalized `Tensor` with the same shape as `tensor`.
norm: The computed norms with the same shape and dtype `tensor` but the
final axis is 1 instead. Same as running
`tf.cast(tf.linalg.norm(tensor, ord, axis keepdims=True), tensor.dtype)`.
Raises:
ValueError: If `ord` or `axis` is invalid.
"""
with ops.name_scope(name, "normalize", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor)
norm = linalg_ops.norm(tensor, ord, axis, keepdims=True)
norm = math_ops.cast(norm, tensor.dtype)
normalized = tensor / norm
return normalized, norm
@tf_export(v1=["math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
"""Normalizes along dimension `axis` using an L2 norm.
For a 1-D tensor with `axis = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
Args:
x: A `Tensor`.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` with the same shape as `x`.
"""
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
return l2_normalize_v2(x, axis, epsilon, name)
@tf_export("math.l2_normalize", "linalg.l2_normalize", "nn.l2_normalize", v1=[])
@dispatch.add_dispatch_support
def l2_normalize_v2(x, axis=None, epsilon=1e-12, name=None):
"""Normalizes along dimension `axis` using an L2 norm.
For a 1-D tensor with `axis = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
* 1-D tensor example:
>>> x = tf.constant([3.0, 4.0])
>>> tf.math.l2_normalize(x).numpy()
array([0.6, 0.8], dtype=float32)
* 2-D tensor example:
>>> x = tf.constant([[3.0], [4.0]])
>>> tf.math.l2_normalize(x, 0).numpy()
array([[0.6],
[0.8]], dtype=float32)
>>> x = tf.constant([[3.0], [4.0]])
>>> tf.math.l2_normalize(x, 1).numpy()
array([[1.],
[1.]], dtype=float32)
Args:
x: A `Tensor`.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
square_real = math_ops.square(math_ops.real(x))
square_imag = math_ops.square(math_ops.imag(x))
square_sum = math_ops.real(
math_ops.reduce_sum(square_real + square_imag, axis, keepdims=True))
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
norm_real = math_ops.multiply(math_ops.real(x), x_inv_norm)
norm_imag = math_ops.multiply(math_ops.imag(x), x_inv_norm)
return math_ops.complex(norm_real, norm_imag, name=name)
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
def _count_nonzero(input_tensor, dtype=dtypes.int64):
"""Same as math_ops.count_nonzero.
The reduction is done in dtype, which can be faster for 32-bit dtypes.
Args:
input_tensor: numeric tensor
dtype: reduction dtype
Returns:
number of nonzero values with type dtype
"""
with ops.name_scope("count_nonzero", values=[input_tensor]):
zero = array_ops.zeros([], dtype=input_tensor.dtype)
nonzero_count = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(input_tensor, zero),
dtype=dtype), name="nonzero_count")
return nonzero_count
@tf_export("math.zero_fraction", "nn.zero_fraction")
@dispatch.add_dispatch_support
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
size = array_ops.size(value, out_type=dtypes.int64)
# If the count is small, we can save memory/CPU with an int32 reduction.
num_nonzero = control_flow_ops.cond(
size <= dtypes.int32.max,
# pylint: disable=g-long-lambda
true_fn=lambda: math_ops.cast(
_count_nonzero(value, dtype=dtypes.int32),
dtype=dtypes.int64),
false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))
with ops.name_scope("counts_to_fraction"):
num_zero = size - num_nonzero
num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)
size_float32 = math_ops.cast(size, dtype=dtypes.float32)
zero_fraction_float32 = num_zero_float32 / size_float32
return array_ops.identity(zero_fraction_float32, "fraction")
# pylint: disable=redefined-builtin
@tf_export(v1=["nn.depthwise_conv2d"])
@dispatch.add_dispatch_support
def depthwise_conv2d(input,
filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail, with the default NHWC format,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Usage Example:
>>> x = np.array([
... [1., 2.],
... [3., 4.],
... [5., 6.]
... ], dtype=np.float32).reshape((1, 3, 2, 1))
>>> kernel = np.array([
... [1., 2.],
... [3., 4]
... ], dtype=np.float32).reshape((2, 1, 1, 2))
>>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding='VALID').numpy()
array([[[[10., 14.],
[14., 20.]],
[[18., 26.],
[22., 32.]]]], dtype=float32)
>>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]
... ).numpy()
array([[[[ 0., 0.],
[ 3., 4.],
[ 6., 8.]],
[[ 0., 0.],
[10., 14.],
[14., 20.]],
[[ 0., 0.],
[18., 26.],
[22., 32.]]]], dtype=float32)
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: Alias of rate.
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
rate = deprecated_argument_lookup("dilations", dilations, "rate", rate)
with ops.name_scope(name, "depthwise", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
if rate is None:
rate = [1, 1]
# Use depthwise_conv2d_native if executing on TPU.
if device_context.enclosing_tpu_context() is not None:
if data_format == "NCHW":
dilations = [1, 1, rate[0], rate[1]]
else:
dilations = [1, rate[0], rate[1], 1]
return nn_ops.depthwise_conv2d_native(
input=input,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
@tf_export("nn.depthwise_conv2d", v1=[])
@dispatch.add_dispatch_support
def depthwise_conv2d_v2(input,
filter,
strides,
padding,
data_format=None,
dilations=None,
name=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail, with the default NHWC format,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Usage Example:
>>> x = np.array([
... [1., 2.],
... [3., 4.],
... [5., 6.]
... ], dtype=np.float32).reshape((1, 3, 2, 1))
>>> kernel = np.array([
... [1., 2.],
... [3., 4]
... ], dtype=np.float32).reshape((2, 1, 1, 2))
>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding='VALID').numpy()
array([[[[10., 14.],
[14., 20.]],
[[18., 26.],
[22., 32.]]]], dtype=float32)
>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],
... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]).numpy()
array([[[[ 0., 0.],
[ 3., 4.],
[ 6., 8.]],
[[ 0., 0.],
[10., 14.],
[14., 20.]],
[[ 0., 0.],
[18., 26.],
[22., 32.]]]], dtype=float32)
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
return depthwise_conv2d(input=input,
filter=filter,
strides=strides,
padding=padding,
rate=dilations,
name=name,
data_format=data_format)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin,line-too-long
@tf_export(v1=["nn.separable_conv2d"])
@dispatch.add_dispatch_support
def separable_conv2d(input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None,
data_format=None,
dilations=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: Controls how to pad the image before applying the depthwise
convolution. Can be the string `"SAME"` or `"VALID"` indicating the type
of padding algorithm to use, or a Python list indicating the explicit
paddings at the start and end of each dimension. When explicit padding is
used and data_format is `"NHWC"`, this should be in the form `[[0, 0],
[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit
padding used and data_format is `"NCHW"`, this should be in the form
`[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: Alias of rate.
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
rate = deprecated_argument_lookup("dilations", dilations, "rate", rate)
with ops.name_scope(name, "separable_conv2d",
[input, depthwise_filter, pointwise_filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(
depthwise_filter, name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(
pointwise_filter, name="pointwise_filter")
pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4)
pointwise_filter_shape.dims[0].assert_is_compatible_with(1)
pointwise_filter_shape.dims[1].assert_is_compatible_with(1)
if rate is None:
rate = [1, 1]
# The layout of the ops in the graph are expected to be as follows:
# depthwise_conv2d // Conv2D op corresponding to native depthwise conv.
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=depthwise_filter,
strides=strides,
padding=padding,
data_format=data_format,
name="depthwise")
depthwise = nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(depthwise_filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
return nn_ops.conv2d(
depthwise,
pointwise_filter, [1, 1, 1, 1],
padding="VALID",
data_format=data_format,
name=name)
@tf_export("nn.separable_conv2d", v1=[])
@dispatch.add_dispatch_support
def separable_conv2d_v2(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
data_format=None,
dilations=None,
name=None,
):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width,
in_channels, channel_multiplier]`. Contains `in_channels` convolutional
filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier *
in_channels, out_channels]`. Pointwise filter to mix channels after
`depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for each
dimension of `input`.
padding: Controls how to pad the image before applying the depthwise
convolution. Can be the string `"SAME"` or `"VALID"` indicating the type
of padding algorithm to use, or a Python list indicating the explicit
paddings at the start and end of each dimension. When explicit padding is
used and data_format is `"NHWC"`, this should be in the form `[[0, 0],
[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit
padding used and data_format is `"NCHW"`, this should be in the form
`[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
dilations: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
return separable_conv2d(
input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=dilations,
name=name,
data_format=data_format)
# pylint: enable=redefined-builtin,line-too-long
@tf_export(v1=["nn.sufficient_statistics"])
@dispatch.add_dispatch_support
def sufficient_statistics(x, axes, shift=None, keep_dims=None, name=None,
keepdims=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
For example:
>>> t = [[1, 2, 3], [4, 5, 6]]
>>> sufficient_statistics(t, [1])
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([ 6, 15], dtype=int32)>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([14, 77], dtype=int32)>, None)
>>> sufficient_statistics(t, [-1])
(<tf.Tensor: shape=(), dtype=int32, numpy=3>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([ 6, 15], dtype=int32)>, <tf.Tensor: shape=(2,),
dtype=int32, numpy=array([14, 77], dtype=int32)>, None)
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance. As in
Python, the axes can also be negative numbers. A negative axis is
interpreted as counting from the end of the rank, i.e., axis +
rank(values)-th dimension.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
keepdims: Alias for keep_dims.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if x_shape.rank is not None and all(
x_shape.dims[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape.dims[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
# Normalize axes to be positive. Required for gather.
rank = array_ops.rank(x)
positive_axes = [axis + rank if axis < 0 else axis for axis in axes]
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), positive_axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keepdims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keepdims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
@tf_export("nn.sufficient_statistics", v1=[])
@dispatch.add_dispatch_support
def sufficient_statistics_v2(x, axes, shift=None, keepdims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keepdims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
return sufficient_statistics(
x=x, axes=axes, shift=shift, keep_dims=keepdims, name=name)
@tf_export("nn.normalize_moments")
@dispatch.add_dispatch_support
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(
math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
@tf_export(v1=["nn.moments"])
@dispatch.add_dispatch_support
def moments(
x,
axes,
shift=None, # pylint: disable=unused-argument
name=None,
keep_dims=None,
keepdims=None):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
keepdims: Alias to keep_dims.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "moments", [x, axes]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keepdims=True, name="mean")
# sample variance, not unbiased variance
# Note: stop_gradient does not change the gradient that gets
# backpropagated to the mean from the variance calculation,
# because that gradient is zero
variance = math_ops.reduce_mean(
math_ops.squared_difference(y, array_ops.stop_gradient(mean)),
axes,
keepdims=True,
name="variance")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(variance, dtypes.float16))
else:
return (mean, variance)
@tf_export("nn.moments", v1=[])
@dispatch.add_dispatch_support
def moments_v2(
x,
axes,
shift=None,
keepdims=False,
name=None):
"""Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation.
keepdims: produce moments with the same dimensionality as the input.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
return moments(x=x, axes=axes, shift=shift, name=name, keep_dims=keepdims)
@tf_export(v1=["nn.weighted_moments"])
@dispatch.add_dispatch_support
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=None,
keepdims=None):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
keepdims: Alias of keep_dims.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
keep_dims = deprecated_argument_lookup(
"keepdims", keepdims, "keep_dims", keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name="x")
frequency_weights = ops.convert_to_tensor(
frequency_weights, name="frequency_weights")
# Unlike moments(), this just uses a simpler two-pass method.
# See comment in moments() WRT precision; it applies here too.
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
# Note that we use keep_dims=True for our reductions regardless of the arg;
# this is so that the results remain broadcast-compatible with the inputs.
weighted_input_sum = math_ops.reduce_sum(
frequency_weights * x, axes, name="weighted_input_sum", keepdims=True)
# The shape of the weights isn't necessarily the same as x's
# shape, just broadcast-compatible with it -- so this expression
# performs broadcasting to give a per-item weight, with the same
# shape as (frequency_weights * x). This avoids having to reason
# through all the broadcast logic to compute a correct
# sum_of_weights.
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(
broadcasted_weights, axes, name="sum_of_weights", keepdims=True)
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
# Have the weighted mean; now on to variance:
weighted_distsq = math_ops.reduce_sum(
frequency_weights * math_ops.squared_difference(x, weighted_mean),
axes,
name="weighted_distsq",
keepdims=True)
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, axis=axes)
weighted_variance = array_ops.squeeze(
weighted_variance, axis=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return weighted_mean, weighted_variance
@tf_export("nn.weighted_moments", v1=[])
@dispatch.add_dispatch_support
def weighted_moments_v2(x, axes, frequency_weights, keepdims=False, name=None):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
keepdims: Produce moments with the same dimensionality as the input.
name: Name used to scope the operation.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
return weighted_moments(
x=x,
axes=axes,
frequency_weights=frequency_weights,
name=name,
keep_dims=keepdims)
@tf_export("nn.batch_normalization")
@dispatch.add_dispatch_support
def batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Batch normalization.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
* In all generality, they can have the same number of dimensions as the
input `x`, with identical sizes as `x` for the dimensions that are not
normalized over (the 'depth' dimension(s)), and dimension 1 for the
others which are being normalized over.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keepdims=True)` during training, or running averages
thereof during inference.
* In the common case where the 'depth' dimension is the last dimension in
the input tensor `x`, they may be one dimensional tensors of the same
size as the 'depth' dimension.
This is the case for example for the common `[batch, depth]` layout of
fully-connected layers, and `[batch, height, width, depth]` for
convolutions.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keepdims=False)` during training, or running averages
thereof during inference.
See equation 11 in Algorithm 2 of source:
[Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
Returns:
the normalized, scaled, offset tensor.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://arxiv.org/abs/1502.03167)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
# Note: tensorflow/contrib/quantize/python/fold_batch_norms.py depends on
# the precise order of ops that are generated by the expression below.
return x * math_ops.cast(inv, x.dtype) + math_ops.cast(
offset - mean * inv if offset is not None else -mean * inv, x.dtype)
@tf_export(v1=["nn.fused_batch_norm"])
@dispatch.add_dispatch_support
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None,
exponential_avg_factor=1.0):
r"""Batch normalization.
See Source: [Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy]
(http://arxiv.org/abs/1502.03167).
Args:
x: Input `Tensor` of 4 or 5 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean. The shape and meaning
of this argument depends on the value of is_training and
exponential_avg_factor as follows:
is_training==False (inference):
Mean must be a `Tensor` of the same shape as scale containing the
estimated population mean computed during training.
is_training==True and exponential_avg_factor == 1.0:
Mean must be None.
is_training==True and exponential_avg_factor != 1.0:
Mean must be a `Tensor` of the same shape as scale containing the
exponential running mean.
variance: A `Tensor` of 1 dimension for population variance. The shape and
meaning of this argument depends on the value of is_training and
exponential_avg_factor as follows:
is_training==False (inference):
Variance must be a `Tensor` of the same shape as scale containing
the estimated population variance computed during training.
is_training==True and exponential_avg_factor == 1.0:
Variance must be None.
is_training==True and exponential_avg_factor != 1.0:
Variance must be a `Tensor` of the same shape as scale containing
the exponential running variance.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Support "NHWC" (default) or "NCHW" for
4D tenors and "NDHWC" or "NCDHW" for 5D tensors.
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
exponential_avg_factor: A float number (usually between 0 and 1) used
for controlling the decay of the running
population average of mean and variance.
If set to 1.0, the current batch average is
returned.
Returns:
y: A 4D or 5D Tensor for the normalized, scaled, offsetted x.
running_mean: A 1D Tensor for the exponential running mean of x.
The output value is (1 - exponential_avg_factor) * mean +
exponential_avg_factor * batch_mean), where batch_mean
is the mean of the current batch in x.
running_var: A 1D Tensor for the exponential running variance
The output value is (1 - exponential_avg_factor) * variance +
exponential_avg_factor * batch_variance), where batch_variance
is the variance of the current batch in x.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
if (not is_training or exponential_avg_factor != 1.0) and (
(mean is None) or (variance is None)):
raise ValueError("Both 'mean' and 'variance' must be a 1D tensor when "
"is_training is False or "
"exponential_avg_factor != 1.0.")
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Set a minimum epsilon to 1.001e-5, which is a requirement by CUDNN to
# prevent exception (see cudnn.h).
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
y, running_mean, running_var, _, _, _ = gen_nn_ops.fused_batch_norm_v3(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training,
name=name)
return y, running_mean, running_var
@tf_export(v1=["nn.batch_norm_with_global_normalization"])
@dispatch.add_dispatch_support
def batch_norm_with_global_normalization(t=None,
m=None,
v=None,
beta=None,
gamma=None,
variance_epsilon=None,
scale_after_normalization=None,
name=None,
input=None, # pylint: disable=redefined-builtin
mean=None,
variance=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
input: Alias for t.
mean: Alias for m.
variance: Alias for v.
Returns:
A batch-normalized `t`.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
t = deprecated_argument_lookup("input", input, "t", t)
m = deprecated_argument_lookup("mean", mean, "m", m)
v = deprecated_argument_lookup("variance", variance, "v", v)
return batch_normalization(t, m, v, beta, gamma if scale_after_normalization
else None, variance_epsilon, name)
# pylint: disable=redefined-builtin,line-too-long
@tf_export("nn.batch_norm_with_global_normalization", v1=[])
@dispatch.add_dispatch_support
def batch_norm_with_global_normalization_v2(input,
mean,
variance,
beta,
gamma,
variance_epsilon,
scale_after_normalization,
name=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
input: A 4D input Tensor.
mean: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
variance: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
Returns:
A batch-normalized `t`.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
"""
return batch_norm_with_global_normalization(t=input,
m=mean,
v=variance,
beta=beta,
gamma=gamma,
variance_epsilon=variance_epsilon,
scale_after_normalization=scale_after_normalization,
name=name)
# pylint: enable=redefined-builtin,line-too-long
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.stack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None,
seed=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The (possibly-partitioned)
class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
Returns:
out_logits: `Tensor` object with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
out_labels: A Tensor object with the same shape as `out_logits`.
"""
if isinstance(weights, variables.PartitionedVariable):
weights = list(weights)
if not isinstance(weights, list):
weights = [weights]
with ops.name_scope(name, "compute_sampled_logits",
weights + [biases, inputs, labels]):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes,
seed=seed)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = (
array_ops.stop_gradient(s) for s in sampled_values)
# pylint: enable=unpacking-non-sequence
sampled = math_ops.cast(sampled, dtypes.int64)
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat([labels_flat, sampled], 0)
# Retrieve the true weights and the logits of the sampled weights.
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
if all_w.dtype != inputs.dtype:
all_w = math_ops.cast(all_w, inputs.dtype)
# true_w shape is [batch_size * num_true, dim]
true_w = array_ops.slice(all_w, [0, 0],
array_ops.stack(
[array_ops.shape(labels_flat)[0], -1]))
sampled_w = array_ops.slice(
all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# Apply X*W', which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True)
# Retrieve the true and sampled biases, compute the true logits, and
# add the biases to the true and sampled logits.
all_b = embedding_ops.embedding_lookup(
biases, all_ids, partition_strategy=partition_strategy)
if all_b.dtype != inputs.dtype:
all_b = math_ops.cast(all_b, inputs.dtype)
# true_b is a [batch_size * num_true] tensor
# sampled_b is a [num_sampled] float tensor
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_logits += sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
"sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1],
array_ops.expand_dims(num_sampled, 0)], 0)
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += gen_sparse_ops.sparse_to_dense(
sparse_indices,
sampled_logits_shape,
acc_weights,
default_value=0.0,
validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float
# tensor of ones. We then divide by num_true to ensure the per-example
# labels sum to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
return out_logits, out_labels
@tf_export("nn.nce_loss", v1=[])
@dispatch.add_dispatch_support
def nce_loss_v2(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical
models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms
Reference](https://www.tensorflow.org/extras/candidate_sampling.pdf)
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference as in the following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: when doing embedding lookup on `weights` and `bias`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape [num_classes,
dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The
target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of
the input network.
num_sampled: An `int`. The number of negative classes to randomly sample
per batch. This single sample of negative classes is evaluated for each
element in the batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to `True`,
this is a "Sampled Logistic" loss instead of NCE, and we are learning to
generate log-odds instead of log probabilities. See our [Candidate
Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf). Default is
False.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
# TODO(yuefengz): get partition_strategy from either variables or distribution
# strategies.
return nce_loss(
weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy="div",
name=name)
@tf_export(v1=["nn.nce_loss"])
@dispatch.add_dispatch_support
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
`tf.random.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of negative classes to randomly sample
per batch. This single sample of negative classes is evaluated for each
element in the batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our Candidate Sampling Algorithms Reference
([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
References:
Noise-contrastive estimation - A new estimation principle for unnormalized
statistical models:
[Gutmann et al., 2010](http://proceedings.mlr.press/v9/gutmann10a)
([pdf](http://proceedings.mlr.press/v9/gutmann10a/gutmann10a.pdf))
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
@tf_export("nn.sampled_softmax_loss", v1=[])
@dispatch.add_dispatch_support
def sampled_softmax_loss_v2(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
seed=None,
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference as in the following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...)
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Note: when doing embedding lookup on `weights` and `bias`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape [num_classes,
dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The
target classes. Note that this format differs from the `labels` argument
of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of
the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is True.
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
return sampled_softmax_loss(
weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy="div",
name=name,
seed=seed)
@tf_export(v1=["nn.sampled_softmax_loss"])
@dispatch.add_dispatch_support
def sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss",
seed=None):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our Candidate Sampling Algorithms Reference
([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)).
Also see Section 3 of (Jean et al., 2014) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
seed: random seed for candidate sampling. Default to None, which doesn't set
the op-level random seed for candidate sampling.
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
References:
On Using Very Large Target Vocabulary for Neural Machine Translation:
[Jean et al., 2014]
(https://aclanthology.coli.uni-saarland.de/papers/P15-1001/p15-1001)
([pdf](http://aclweb.org/anthology/P15-1001))
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name,
seed=seed)
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
sampled_losses = nn_ops.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
|
annarev/tensorflow
|
tensorflow/python/ops/nn_impl.py
|
Python
|
apache-2.0
| 99,524
|
[
"Gaussian"
] |
f1c34b4851a5cf723332f966ad04547a1fced25ee0f4d6591bcb17d6f559c446
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import copy
class ParameterInfo(object):
"""
Holds the information for a parameter
"""
def __init__(self, parent, name):
self.value = ""
self.user_added = False
self.name = name
self.required = False
self.cpp_type = "string"
self.basic_type = "String"
self.group_name = "Main"
self.description = ""
self.default = ""
self.parent = parent
self.comments = ""
self.options = []
self.set_in_input_file = False
def setFromData(self, data):
"""
Sets this attributes from a Json dict.
Input:
data[dict]: This is the dict description of the parameter as read from the JSON dump.
"""
self.default = data.get("default", "")
if self.default is None:
self.default = ""
self.cpp_type = data["cpp_type"]
self.basic_type = data["basic_type"]
self.description = data["description"]
self.group_name = data["group_name"]
if not self.group_name:
self.group_name = "Main"
self.required = data["required"]
self.name = data["name"]
self.options = data.get("options", "")
if self.options:
self.options = self.options.strip().split()
else:
self.options = []
if self.cpp_type == "bool":
if self.default == "0":
self.default = "false"
elif self.default == "1":
self.default = "true"
elif not self.default:
self.default = "false"
self.value = self.default
def copy(self, parent):
"""
Copies this ParameterInfo to a new one.
Input:
parent[BlockInfo]: Parent of the new ParameterInfo
Return:
ParameterInfo: The copied parameter
"""
new = copy.copy(self)
new.parent = parent
new.comments = ""
return new
def needsQuotes(self):
"""
Check whether we need to write out quotes around this parameter value.
Return:
bool
"""
return ( self.isVectorType() or
self.user_added or
("basic_string" in self.cpp_type and self.name == "value") or
("std::string" in self.cpp_type and self.name == "value") or
self.cpp_type == "FunctionExpression" or
' ' in self.value or
';' in self.value or
'=' in self.value or
'\n' in self.value
)
def isVectorType(self):
"""
Check whether this is a vector type.
Return:
bool
"""
return self.basic_type.startswith("Array")
def inputFileValue(self):
"""
Return the string that should be written to the input file.
Some values needs single quotes while others do not.
"""
if self.needsQuotes() and (self.value or self.user_added):
return "'%s'" % self.value
else:
return str(self.value)
def hitType(self):
"""
Return the Hit Field type
"""
hit_map = {"Boolean": "Bool", "Real": "Float", "Integer": "Int"}
return hit_map.get(self.basic_type, "String")
def toolTip(self):
return self.description + "\nDefault: %s" % self.default
def hasChanged(self):
return self.value != self.default or self.comments
def dump(self, o, indent=0, sep=' '):
o.write("%sName: %s\n" % (indent*sep, self.name))
o.write("%sValue: %s\n" % (indent*sep, self.value))
o.write("%sDefault: %s\n" % (indent*sep, self.default))
o.write("%sUser added: %s\n" % (indent*sep, self.user_added))
o.write("%sRequired: %s\n" % (indent*sep, self.required))
o.write("%sCpp_type: %s\n" % (indent*sep, self.cpp_type))
o.write("%sGroup: %s\n" % (indent*sep, self.group_name))
o.write("%sDescription: %s\n" % (indent*sep, self.description))
o.write("%sComments: %s\n" % (indent*sep, self.comments))
|
nuclear-wizard/moose
|
python/peacock/Input/ParameterInfo.py
|
Python
|
lgpl-2.1
| 4,414
|
[
"MOOSE"
] |
63e5a1a8382245ef0f5dd70720245f09e59966c6a68e8a5ff2c0810fcc62fa6f
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.1.0"
classes = """
Development Status :: 3 - Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 3.5
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
long_description = ("Deblur: a greedy deconvolution algorithm based on known "
"read error profiles")
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='deblur',
version=__version__,
long_description=long_description,
license="BSD",
description='Deblur',
author="Deblur development team",
author_email="amnonim@gmail.com",
url='https://github.com/biocore/deblur',
test_suite='nose.collector',
packages=['deblur', 'deblur.support_files', 'deblur.test'],
package_data={'deblur.support_files': ['artifacts.fa', '88_otus.fasta']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8"],
'doc': ["Sphinx >= 1.2.2", "sphinx-bootstrap-theme"]},
install_requires=['click >= 6', 'numpy >= 1.7',
'scikit-bio >= 0.5.0, < 0.6.0',
'biom-format >= 2.1.3, < 2.2.0',
'h5py >= 2.2.0', 'scipy >= 0.15.1'],
classifiers=classifiers
)
|
amnona/deblur
|
setup.py
|
Python
|
bsd-3-clause
| 2,025
|
[
"scikit-bio"
] |
6f19fd4410941bbbf14352894f039795a9fc7d1970fe0ae64ede227c2f040b71
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.show()
+--------------------+--------------------+
| mean| cov|
+--------------------+--------------------+
|[0.82500000140229...|0.005625000000006...|
|[-0.4777098016092...|0.167969502720916...|
|[-0.4472625243352...|0.167304119758233...|
+--------------------+--------------------+
...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.show()
+--------------------+--------------------+
| mean| cov|
+--------------------+--------------------+
|[0.82500000140229...|0.005625000000006...|
|[-0.4777098016092...|0.167969502720916...|
|[-0.4472625243352...|0.167304119758233...|
+--------------------+--------------------+
...
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
pass
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None)
Sets params for KMeans.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasSeed,
JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0)
Sets params for BisectingKMeans.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
return LocalLDAModel(self._call_java("toLocal"))
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an el
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
Sets params for LDA.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
if __name__ == "__main__":
import doctest
import pyspark.ml.clustering
from pyspark.sql import SparkSession
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
|
alec-heif/MIT-Thesis
|
spark-bin/python/pyspark/ml/clustering.py
|
Python
|
mit
| 41,715
|
[
"Gaussian"
] |
ff86d5ed25c736bcfe8085da2bb5ced7c83031f1b6097c6bbb6d5cf394977c4a
|
"""
Execute each notebook as a test, reporting an error if any cell throws an exception.
Adapted from https://gist.github.com/minrk/2620876.
"""
from __future__ import print_function
import os
import sys
import socket
from distutils.spawn import find_executable as _find_executable
import pytest
try:
import nbformat
from jupyter_client import KernelManager
except:
pytest.skip("Skipping no nbformat/jupyter", allow_module_level=True)
from six.moves.queue import Empty
FLAKEY_LIST = ['centroids.ipynb', 'native-contact.ipynb', 'hbonds.ipynb']
SPARTA_PLUS = ['sparta+', 'SPARTA+', 'SPARTA+.linux']
TIMEOUT = 60 # seconds
test_dir = os.path.dirname(os.path.abspath(__file__))
examples = [pytest.param(fn, marks=pytest.mark.flaky) if fn in FLAKEY_LIST else fn
for fn in os.listdir(test_dir) if fn.endswith('.ipynb')]
def is_network_connected():
try:
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection(("1.1.1.1", 53))
return True
except OSError:
pass
return False
def find_executable(names):
for possible in names:
result = _find_executable(possible)
if result is not None:
return result
return None
@pytest.fixture(params=examples)
def example_fn(request):
if 'openmm' in request.param:
try:
from simtk.openmm import app
except ImportError:
pytest.skip("Openmm required for example notebook `{}`".format(request.param))
if "nmr" in request.param:
if find_executable(SPARTA_PLUS) is None:
pytest.skip("Sparta+ not found for example notebook `{}`".format(request.param))
if not is_network_connected():
if any(x in request.param for x in ("native-contact", "hbonds")):
pytest.skip("Network access required")
cwd = os.path.abspath('.')
os.chdir(test_dir)
yield request.param
os.chdir(cwd)
def test_example_notebook(example_fn):
with open(example_fn) as f:
nb = nbformat.reads(f.read(), nbformat.NO_CONVERT)
run_notebook(nb)
def run_notebook(nb):
km = KernelManager()
km.start_kernel(stderr=open(os.devnull, 'w'))
kc = km.client()
kc.start_channels()
# simple ping:
kc.execute("pass")
kc.get_shell_msg()
failures = 0
for cell in nb.cells:
if cell.cell_type != 'code':
continue
kc.execute(cell.source)
try:
# wait for finish, w/ timeout
reply = kc.get_shell_msg(timeout=TIMEOUT)['content']
except Empty:
raise Exception(
'Timeout (%.1f) when executing the following %s cell: "%s"' %
(TIMEOUT, cell.cell_type, cell.source.strip()))
if reply['status'] == 'error':
failures += 1
print("\nFAILURE:", file=sys.stderr)
print('\n'.join(reply['traceback']), file=sys.stderr)
print(file=sys.stderr)
kc.stop_channels()
km.shutdown_kernel()
del km
if failures > 0:
raise Exception()
|
rmcgibbo/mdtraj
|
examples/test_examples.py
|
Python
|
lgpl-2.1
| 3,093
|
[
"OpenMM"
] |
a8361e76faadd3ab409ccb6fd1a34e4ca97d4c6589d050cdad50c1a6a2b1000f
|
"""
A simple VTK widget for PyQt or PySide.
See http://www.trolltech.com for Qt documentation,
http://www.riverbankcomputing.co.uk for PyQt, and
http://pyside.github.io for PySide.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
Changes by Rodrigo Mologni, Sep. 2013 (Credit to Daniele Esposti)
Bug fix to PySide: Converts PyCObject to void pointer.
Changes by Greg Schussman, Aug. 2014
The keyPressEvent function now passes keysym instead of None.
Changes by Alex Tsui, Apr. 2015
Port from PyQt4 to PyQt5.
Changes by Fabian Wenzel, Jan. 2016
Support for Python3
"""
import sys
from pyface.qt import qt_api
if qt_api == 'pyqt':
PyQtImpl = "PyQt4"
else:
PyQtImpl = "PySide"
import vtk
from tvtk import messenger
if PyQtImpl == "PyQt5":
from PyQt5.QtWidgets import QWidget, QSizePolicy, QApplication
from PyQt5.QtGui import QWheelEvent
from PyQt5.QtCore import Qt, QTimer, QObject, QSize, QEvent
elif PyQtImpl == "PyQt4":
from PyQt4.QtGui import QWidget, QSizePolicy, QApplication, QWheelEvent
from PyQt4.QtCore import Qt, QTimer, QObject, QSize, QEvent
elif PyQtImpl == "PySide":
from PySide.QtGui import QWidget, QSizePolicy, QApplication, QWheelEvent
from PySide.QtCore import Qt, QTimer, QObject, QSize, QEvent
else:
raise ImportError("Unknown PyQt implementation " + repr(PyQtImpl))
class QVTKRenderWindowInteractor(QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = Qt.NoButton
# private attributes
self.__oldFocus = None
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = Qt.NoModifier
self.__saveButtons = Qt.NoButton
# do special handling of some keywords:
# stereo, rw
try:
stereo = bool(kw['stereo'])
except KeyError:
stereo = False
try:
rw = kw['rw']
except KeyError:
rw = None
# create qt-level widget
QWidget.__init__(self, parent, wflags|Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
wid = self._get_win_id()
self._RenderWindow.SetWindowInfo(wid)
self._should_set_parent_info = (sys.platform == 'win32')
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
try:
self._Iren = kw['iren']
except KeyError:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(Qt.WheelFocus)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
# add wheel timer to fix scrolling issue with trackpad
self.wheel_timer = QTimer()
self.wheel_timer.setSingleShot(True)
self.wheel_timer.setInterval(25)
self.wheel_timer.timeout.connect(self._emit_wheel_event)
self.wheel_accumulator = 0
self._saved_wheel_event_info = ()
self._Iren.AddObserver('CreateTimerEvent', messenger.send)
messenger.connect(self._Iren, 'CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', messenger.send)
messenger.connect(self._Iren, 'DestroyTimerEvent', self.DestroyTimer)
self._RenderWindow.AddObserver('CursorChangedEvent', messenger.send)
messenger.connect(self._RenderWindow, 'CursorChangedEvent',
self.CursorChangedEvent)
# Create a hidden child widget and connect its destroyed signal to its
# parent ``Finalize`` slot. The hidden children will be destroyed
# before its parent thus allowing cleanup of VTK elements.
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(
self.__class__.__name__ + " has no attribute named " + attr
)
def _get_win_id(self):
WId = self.winId()
# Python2
if type(WId).__name__ == 'PyCObject':
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
# Python3
elif type(WId).__name__ == 'PyCapsule':
from ctypes import pythonapi, c_void_p, py_object, c_char_p
pythonapi.PyCapsule_GetName.restype = c_char_p
pythonapi.PyCapsule_GetName.argtypes = [py_object]
name = pythonapi.PyCapsule_GetName(WId)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
WId = pythonapi.PyCapsule_GetPointer(WId, name)
return str(WId)
def Finalize(self):
'''
Call internal cleanup method on VTK objects
'''
self._RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, Qt.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._RenderWindow.Render()
def resizeEvent(self, ev):
if self._should_set_parent_info:
# Set the window info and parent info on every resize.
# vtkWin32OpenGLRenderWindow will render using incorrect offsets if
# the parent info is not given to it because it assumes that it
# needs to make room for the title bar.
winid = self._get_win_id()
self._RenderWindow.SetWindowInfo(winid)
parent = self.parent()
if parent is not None:
self._RenderWindow.SetParentInfo(winid)
else:
self._RenderWindow.SetParentInfo('')
w = self.width()
h = self.height()
self._RenderWindow.SetSize(w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & Qt.ShiftModifier:
shift = True
if ev.modifiers() & Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & Qt.ShiftModifier:
shift = True
if self.__saveModifiers & Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
if not self.hasFocus():
self.__oldFocus = self.focusWidget()
self.setFocus()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
if self.__saveButtons == Qt.NoButton and self.__oldFocus:
self.__oldFocus.setFocus()
self.__oldFocus = None
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
key_sym = _qt_key_to_key_sym(ev.key())
if ev.key() < 256:
# Sometimes, the OS allows a chord (e.g. Alt-T) to generate
# a Unicode character outside of the 8-bit Latin-1 range. We will
# try to pass along Latin-1 characters unchanged, since VTK expects
# a single `char` byte. If not, we will try to pass on the root key
# of the chord (e.g. 'T' above).
if ev.text() and ev.text() <= u'\u00ff':
key = ev.text().encode('latin-1')
else:
# Has modifiers, but an ASCII key code.
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, key_sym)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
key_sym = _qt_key_to_key_sym(ev.key())
if ev.key() < 256:
if ev.text() and ev.text() <= u'\u00ff':
key = ev.text().encode('latin-1')
else:
# Has modifiers, but an ASCII key code.
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
""" Reimplemented to work around scrolling bug in Mac.
Work around https://bugreports.qt-project.org/browse/QTBUG-22269.
Accumulate wheel events that are within a period of 25ms into a single
event. Changes in buttons or modifiers, while a scroll is going on,
are not handled, since they seem to be too much of a corner case to be
worth handling.
"""
self.wheel_accumulator += ev.delta()
self._saved_wheel_event_info = (
ev.pos(),
ev.globalPos(),
self.wheel_accumulator,
ev.buttons(),
ev.modifiers(),
ev.orientation()
)
ev.setAccepted(True)
if not self.wheel_timer.isActive():
self.wheel_timer.start()
def _emit_wheel_event(self):
ev = QWheelEvent(*self._saved_wheel_event_info)
if ev.delta() >= 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
self.wheel_timer.stop()
self.wheel_accumulator = 0
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
_keysyms = {
Qt.Key_Backspace: 'BackSpace',
Qt.Key_Tab: 'Tab',
Qt.Key_Backtab: 'Tab',
# Qt.Key_Clear : 'Clear',
Qt.Key_Return: 'Return',
Qt.Key_Enter: 'Return',
Qt.Key_Shift: 'Shift_L',
Qt.Key_Control: 'Control_L',
Qt.Key_Alt: 'Alt_L',
Qt.Key_Pause: 'Pause',
Qt.Key_CapsLock: 'Caps_Lock',
Qt.Key_Escape: 'Escape',
Qt.Key_Space: 'space',
# Qt.Key_Prior : 'Prior',
# Qt.Key_Next : 'Next',
Qt.Key_End: 'End',
Qt.Key_Home: 'Home',
Qt.Key_Left: 'Left',
Qt.Key_Up: 'Up',
Qt.Key_Right: 'Right',
Qt.Key_Down: 'Down',
Qt.Key_SysReq: 'Snapshot',
Qt.Key_Insert: 'Insert',
Qt.Key_Delete: 'Delete',
Qt.Key_Help: 'Help',
Qt.Key_0: '0',
Qt.Key_1: '1',
Qt.Key_2: '2',
Qt.Key_3: '3',
Qt.Key_4: '4',
Qt.Key_5: '5',
Qt.Key_6: '6',
Qt.Key_7: '7',
Qt.Key_8: '8',
Qt.Key_9: '9',
Qt.Key_A: 'a',
Qt.Key_B: 'b',
Qt.Key_C: 'c',
Qt.Key_D: 'd',
Qt.Key_E: 'e',
Qt.Key_F: 'f',
Qt.Key_G: 'g',
Qt.Key_H: 'h',
Qt.Key_I: 'i',
Qt.Key_J: 'j',
Qt.Key_K: 'k',
Qt.Key_L: 'l',
Qt.Key_M: 'm',
Qt.Key_N: 'n',
Qt.Key_O: 'o',
Qt.Key_P: 'p',
Qt.Key_Q: 'q',
Qt.Key_R: 'r',
Qt.Key_S: 's',
Qt.Key_T: 't',
Qt.Key_U: 'u',
Qt.Key_V: 'v',
Qt.Key_W: 'w',
Qt.Key_X: 'x',
Qt.Key_Y: 'y',
Qt.Key_Z: 'z',
Qt.Key_Asterisk: 'asterisk',
Qt.Key_Plus: 'plus',
Qt.Key_Minus: 'minus',
Qt.Key_Period: 'period',
Qt.Key_Slash: 'slash',
Qt.Key_F1: 'F1',
Qt.Key_F2: 'F2',
Qt.Key_F3: 'F3',
Qt.Key_F4: 'F4',
Qt.Key_F5: 'F5',
Qt.Key_F6: 'F6',
Qt.Key_F7: 'F7',
Qt.Key_F8: 'F8',
Qt.Key_F9: 'F9',
Qt.Key_F10: 'F10',
Qt.Key_F11: 'F11',
Qt.Key_F12: 'F12',
Qt.Key_F13: 'F13',
Qt.Key_F14: 'F14',
Qt.Key_F15: 'F15',
Qt.Key_F16: 'F16',
Qt.Key_F17: 'F17',
Qt.Key_F18: 'F18',
Qt.Key_F19: 'F19',
Qt.Key_F20: 'F20',
Qt.Key_F21: 'F21',
Qt.Key_F22: 'F22',
Qt.Key_F23: 'F23',
Qt.Key_F24: 'F24',
Qt.Key_NumLock: 'Num_Lock',
Qt.Key_ScrollLock: 'Scroll_Lock',
}
def _qt_key_to_key_sym(key):
""" Convert a Qt key into a vtk keysym.
This is essentially copied from the c++ implementation in
GUISupport/Qt/QVTKInteractorAdapter.cxx.
"""
if key not in _keysyms:
return None
return _keysyms[key]
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
|
dmsurti/mayavi
|
tvtk/pyface/ui/qt4/QVTKRenderWindowInteractor.py
|
Python
|
bsd-3-clause
| 21,119
|
[
"CRYSTAL",
"VTK"
] |
23b693ec5af221b6c693613927a7c95e4a17f0c7620a3895f1ca0845e0a8b75d
|
# -*- coding: utf-8 -*-
"""
Tools for trend analysis.
Author: R. Lombaert
"""
import os
import operator
from numpy import array
import numpy as np
from numpy.random import normal
from matplotlib import pyplot as plt
import cc.path
from cc.modeling.objects import Transition
from cc.plotting import Plotting2
from cc.data import Sed
def makeDiagnosticPlot(sg,molec,scaling=[],escaling=[],combine_water=0,\
edists=[],pfn_path=''):
'''
Make a diagnostic plot for a series of stars, where Imb for transitions of
a given molecule are compared with their upper level energies.
Line strengths are always scaled with distance squared. Additional scaling
can be requested.
Three plots are made: One with scaling versus distance and two with scaling
versus distance and the CO line strength of the J=15-14 and J=30-29 lines
respectively. The comparison with CO line strengths is not scaled except
with distance.
@param sg: The stellar models, in which the transitions have been matched
with integrated line strengths.
@type sg: list[Star()]
@param molec: The molecule for which this is done, shorthand notation.
@type molec: str
@keyword scaling: Scale the line strengths also with a keyword given here.
'MDOT_GAS', etc. Assumes a Star() object knows the
keyword.
(default:[])
@type scaling: list[string]
@keyword combine_water: Combine both ortho and para water in a single plot
(default: 0)
@type combine_water: bool
@keyword edists: Include errors for distance estimates of stars here.
(default: [])
@type edists: list[float]
@keyword escaling: Include relative errors of extra scaling parameters
as lists. len is len of scaling, len of an element
is len of sg
(default: [])
@type escaling: list[list]
@keyword pfn_path: Output folder for diagnostic plots. Default if to be
stored locally.
(default: '')
@type pfn_path: str
'''
if '1H1H16O' not in molec: combine_water = 0
if len(scaling) != len(escaling):
print 'No errors on the scaling factors taken into account.'
escaling = []
if len(edists) != len(sg):
print 'No errors on the distance taken into account.'
edists = [0]*len(sg)
allints = []
allenergies = []
allerrors = []
allwaves = []
allints_co15 = []
allints_co30 = []
allerrs_co15 = []
allerrs_co30 = []
#-- Select all CO line strengths for two typical transitions: 15-14, 30-29
co = sg[0].getMolecule('12C16O')
#-- No need to define other pars. These only serve as a template.
co1514 = Transition.Transition(molecule=co,telescope='PACS',jup=15,\
jlow=14,path_gastronoom='codeJun2013')
co3029 = Transition.Transition(molecule=co,telescope='PACS',jup=30,\
jlow=29,path_gastronoom='codeJun2013')
trl_co15 = Transition.getTransFromStarGrid(sg,co1514,'sample')
trl_co30 = Transition.getTransFromStarGrid(sg,co3029,'sample')
ls_co15,errs_co15 = Transition.getLineStrengths(trl_co15,'dint')
ls_co30,errs_co30 = Transition.getLineStrengths(trl_co30,'dint')
for istar,(s,intco15,intco30,errco15,errco30) in \
enumerate(zip(sg,ls_co15,ls_co30,errs_co15,errs_co30)):
sn = s['STAR_NAME']
allints.append([])
allenergies.append([])
allerrors.append([])
allwaves.append([])
allints_co15.append([])
allints_co30.append([])
allerrs_co15.append([])
allerrs_co30.append([])
if combine_water: \
trans = s.getTransitions('1H1H16O') + s.getTransitions('p1H1H16O')
else:
trans = s.getTransitions(molec)
ls_trans,errls_trans = Transition.getLineStrengths(trans,'dint')
for t,lst,elst in zip(trans,ls_trans,errls_trans):
if not np.isnan(lst):
allints_co15[-1].append(abs(lst/intco15))
allints_co30[-1].append(abs(lst/intco30))
allerrs_co15[-1].append(np.sqrt(sum([elst**2]+[errco15**2]))\
*abs(lst/intco15))
allerrs_co30[-1].append(np.sqrt(sum([elst**2]+[errco30**2]))\
*abs(lst/intco30))
tint = abs(lst*s['DISTANCE']**2/100.**2 )
for par in scaling:
tint *= 1./s[par]
totalerr = np.sqrt(sum([elst**2]+\
[2*edists[istar]**2]+\
[esca[istar]**2 for esca in escaling]))
allints[-1].append(tint)
allenergies[-1].append(t.getEnergyUpper())
allerrors[-1].append(totalerr*tint)
allwaves[-1].append(t.wavelength*10**4)
else:
for tlist in [allints_co15,allints_co30,allerrs_co15,\
allerrs_co30,allints,allerrors]:
tlist[-1].append(float('nan'))
allenergies[-1].append(float(t.getEnergyUpper()))
allwaves[-1].append(t.wavelength*10**4)
isort = np.argsort(allenergies[-1])
for tlist in [allints_co15,allints_co30,allerrs_co15,allwaves,\
allerrs_co30,allints,allerrors,allenergies]:
tlist[-1] = array(tlist[-1])[isort]
isort = np.argsort([s['MDOT_GAS'] for s in sg])
for tlist in [allints_co15,allints_co30,allerrs_co15,allwaves,\
allerrs_co30,allints,allerrors,allenergies,sg]:
tlist = array(tlist)[isort]
linestyles = ['o-','o-','o-','o-','o-','o-','o-',\
'--x','--x','--x','--x','--x','--x','--x',\
'-.s','-.s','-.s','-.s','-.s','-.s','-.s']
colors = ['r','b','k','g','m','y','c']
line_types = [ls + col for ls,col in zip(linestyles,3*colors)]
line_types = line_types[:len(sg)]
xmin = 0
xmax = len(allenergies[0])+1
plot_title = ', '.join(['%i: %.1f cm$^{-1}$ - %.1f $\mu$m'\
%(i+1,t.getEnergyUpper(),t.wavelength*10**4)
for i,t in enumerate(sg[0].getTransitions(molec))])
if scaling:
plot_title += 'Extra scaling: ' + ', '.join([sca.replace('_','\_')
for sca in scaling])
x = []
y = []
yerr = []
y_co15 = []
ye15 = []
y_co30 = []
ye30 = []
for istar in range(len(sg)):
x.append([])
y.append([])
yerr.append([])
y_co15.append([])
y_co30.append([])
ye15.append([])
ye30.append([])
for i,(iint,eint,iint_co15,iint_co30,eint_co15,eint_co30) in \
enumerate(zip(allints[istar],allerrors[istar],\
allints_co15[istar],allints_co30[istar],\
allerrs_co15[istar],allerrs_co30[istar])):
if not np.isnan(iint):
x[-1].append(i+1)
y[-1].append(iint)
yerr[-1].append(eint)
if not np.isnan(iint_co15):
y_co15[-1].append(iint_co15)
ye15[-1].append(eint_co15)
if not np.isnan(iint_co30):
y_co30[-1].append(iint_co30)
ye30[-1].append(eint_co30)
extension = 'pdf'
path = os.path.join(pfn_path,'ints_vs_eul_%s'%molec)
for par in scaling:
if par == scaling[0]:
path += '_extrascaling'
path += '_%s'%par
print Plotting2.plotCols(filename=path,\
x=x,y=y,extension=extension,\
yerr=yerr,\
xmin=xmin,xmax=xmax,plot_title=plot_title,\
yaxis='$I_\mathrm{int}$ (W/m$^2$)',\
xaxis='Index Energy Upper Level',\
line_types=line_types,\
keytags=['%.1e -- %s'%(s['MDOT_GAS'],s['STAR_NAME']) for s in sg],\
key_location=(0.87,0.01),ylogscale=1,\
linewidth=3,fontsize_key=26,fontsize_title=20)
if molec != '12C16O' and not scaling:
path = os.path.join(pfn_path,'ints_co15_vs_eul_%s'%molec)
print Plotting2.plotCols(filename=path,\
x=x,y=y_co15,extension=extension,\
yerr=ye15,\
xmin=xmin,xmax=xmax,plot_title=plot_title,\
yaxis='$I_\mathrm{int}/I_\mathrm{CO 15-14}$ (W/m$^2$)',\
xaxis='Index Energy Upper Level',\
line_types=line_types,\
keytags=['%.1e -- %s'%(s['MDOT_GAS'],s['STAR_NAME']) for s in sg],\
key_location=(0.87,0.01),ylogscale=1,\
linewidth=3,fontsize_key=26,fontsize_title=20)
path = os.path.join(pfn_path,'ints_co30_vs_eul_%s'%molec)
print Plotting2.plotCols(filename=path,\
x=x,y=y_co30,extension=extension,\
yerr=ye30,\
xmin=xmin,xmax=xmax,plot_title=plot_title,\
yaxis='$I_\mathrm{int}/I_\mathrm{CO 30-29}$ (W/m$^2$)',\
xaxis='Index Energy Upper Level',\
line_types=line_types,\
keytags=['%.1e -- %s'%(s['MDOT_GAS'],s['STAR_NAME']) for s in sg],\
key_location=(0.87,0.01),ylogscale=1,\
linewidth=3,fontsize_key=26,fontsize_title=20)
def makeParamPlot(sg,xpar,ypar,expar=[],eypar=[],xratios=[],yratios=[],\
emdot=[],exparlog=0,eyparlog=0,edists=[],mode='dint',\
n_data=0,extra_mpar=[],extra_dpar=[],cfg='',pfn_path='',\
add_linear_fit=0,alf_xmin=None,alf_xmax=None,seds=[],\
deredden=0,**kwargs):
'''
Make a diagnostic plot of either measured line strengths or intrinsic
parameters versus measured line strengths or intrinsic parameters.
Ratios are possible for line strengths. Not for intrinsic parameters.
Requires preparatory work done for the Pacs() and the Star() objects.
@param sg: The stellar models, in which the transitions have been matched
with integrated line strengths. If both models and data are
combined, the data Star() objects are assumed to be listed
first.
@type sg: list[Star()]
@param xpar: The parameter on the x-axis. Can be either a string (Star()
keyword), or an index (of the transition in the first object
in the sg list) for line strengths, or a float giving the
wavelength of the continuum point. When looking at line
strengths in a combo mode (cint or
ctmb) this means it is the index in the transition list of the
data objects rather than the model objects. Transitions in
objects other than the first 1 can have different indices.
Note the essential difference between floats and integers!
@type xpar: string/int
@param ypar: The parameter on the y-axis. Can be either a string (Star()
keyword), or an index (of the transition in the first object
in the sg list) for line strengths, or a float giving the
wavelength of the continuum point. When looking at line
strengths inn a combo mode (cint or
ctmb) this means it is the index in the transition list of the
data objects rather than the model objects. Transitions in
objects other than the first 1 can have different indices.
Note the essential difference between floats and integers!
@type ypar: string/int
@keyword xratios: If xpar is a line strength or a continuum point, multiple
ratios can be requested to be plotted in succession.
Therefore, this gives the indices (if int, refers to the
1st Star() object in sg) or 'mdot' (if ratio wrt Mdot)
or float (in case of a continuum wavelength point) for the
x-axis ratio.
(default: [])
@type xratios: list[int/str]
@keyword yratios: If ypar is a line strength, multiple ratios can be
requested to be plotted in succession.
Therefore, this gives the indices (if int, refers to the
1st Star() object in sg) or 'mdot' (if ratio wrt Mdot)
or float (in case of a continuum wavelength point) for the
y-axis ratio
(default: [])
@type yratios: list[int/str]
@keyword emdot: Include errors for the x/yratio quantity if it is mdot. Not
used for error estimation on mdot as a parameter! The mdot
errors are given in log scale.
(default: [])
@type emdot: list[float]
@keyword expar: The error on the x-parameter if it is a Star() key and if
mode is cint or dint. Number of entries in array is equal
to the number of data Star() objects.
(default: [])
@type expar: array
@keyword eypar: The error on the y-parameter if it is a Star() key and if
mode is cint or dint. Number of entries in array is equal
to the number of data Star() objects.
(default: [])
@type eypar: array
@keyword exparlog: The xpar error is given in logscale. Only relevant for
the d and c modes.
(default: 0)
@type exparlog: bool
@keyword eyparlog: The ypar error is given in logscale. Only relevant for
the d and c modes.
(default: 0)
@type eyparlog: bool
@keyword edists: Include errors for distance estimates of stars here. These
distances are only used to rescale line strengths if they
are not in a ratio.
(default: [])
@type edists: list[float]
@keyword mode: The mode in which line strengths are selected, ie either
from data or from models. Either 'dint', 'mint', 'mtmb' or
'dtmb' values. A combination of both is possible by setting
this key to 'cint' or 'ctmb'. Then the extra keyword
'n_data' is required, which indicates how many Star()
objects are associated with data. The method assumes they
are the first objects in the list of Star() objects. In case
only continuum wavelengths are requested (a combination is
possible!), only the first letter really matters.
(default: 'dint')
@type mode: str
@keyword n_data: The number of data Star() objects, assuming they are the
first in the star_grid. Only required if mode == 'combo'.
This number, if given, must be equal to the number of seds,
if given.
(default: 0)
@type n_data: int
@keyword extra_mpar: If extra conditional parameters are requested for
models, the
plot is colour coded based on them. For instance,
Star() object keywords can serve as conditionals.
Note that these are only applied when mode == mtmb,
mint, cint or ctmb.
(default: [])
@type extra_mpar: list[string]
@keyword extra_dpar: If extra conditional parameters are requested for data,
the plot
is colour coded based on them. For instance, Star()
object keywords can serve as conditionals. Note that
these are only applied when mode == dtmb, dint, cint
or ctmb.
(default: [])
@type extra_dpar: list[string]
@keyword seds: The SEDs of the data objects. Only used when xpar or ypar is
a float (and thus continuum points are required).
The number of SEDs given must
be equal to n_data, or the number of Star() objects if
mode[0] == 'd'. An error is thrown otherwise.
(default: [])
@type seds: list[Sed()]
@keyword deredden: Deredden the SEDs before plotting, in case of continuum
flux points. This is never done in case only one data
object is given and reddening is requested in models to
avoid double correction.
(default: 0)
@type deredden: bool
@keyword cfg: config filename read as a dictionary, can replace any keyword
given to plotCols. Can also be a dictionary itself, in which
case no file is read and the kwargs are updated with the
content of cfg
(default: '')
@type cfg: string/dict
@keyword add_linear_fit: Add a linear fit to the figures. The fit is done
through corrSG method, of which extra arguments
can be given in kwargs. (xpar_co, ypar_co)
Only works in dint or cint mode if xratios or
yratios has len less than 2.
(default: 0)
@type add_linear_fit: bool
@keyword pfn_path: Output folder for diagnostic plots. Default if to be
stored locally.
(default: '')
@type pfn_path: str
@keyword alf_xmin: The minimum x value for the linear fit plot, if
requested. (This is not the cut off value for the
fitting routine itself!) Has to be given if a linear fit
is requested.
(default: None)
@type alf_xmin: float
@keyword alf_xmax: The maximum x value for the linear fit plot, if
requested. (This is not the cut off value for the
fitting routine itself!) Has to be given if a linear fit
is requested.
(default: None)
@type alf_xmax: float
@keyword **kwargs: extra keywords needed for the linear fit, if requested.
@type **kwargs: dict
@return: The filename of the produced plot is returned.
@rtype: str
'''
x_titles = dict([('MDOT_GAS',r'$\log$ $\left[\dot{M}_\mathrm{g}\ (\mathrm{M}_\odot/\mathrm{yr})\right]$'),\
('MDOT_DUST',r'$\log$ $\left[\dot{M}_\mathrm{d}\ (\mathrm{M}_\odot/\mathrm{yr})\right]$'),\
('VEL_INFINITY_GAS',r'$v_{\infty\mathrm{,g}}$ ($\mathrm{km} \mathrm{s}^{-1}$)'),\
('SHELLMASS',r'$\log$ $\left[\dot{M}_\mathrm{g}/v_{\infty\mathrm{,g}}\ (\mathrm{M}_\odot\ \mathrm{yr}^{-1}\ \mathrm{km}^{-1}\ \mathrm{s})\right]$'),\
('SHELLDENS',r'$\log$ $\left[\bar{\rho}\ (\mathrm{g}\ \mathrm{cm}^{-3})\right]$'),\
('SHELLCOLDENS',r'$\log$ $\left[\bar{m}\ (\mathrm{g}\ \mathrm{cm}^{-2})\right]$'),\
('SHELLDENS2',r'$\sqrt{\bar{\rho}^2 R_\star}$ (g/cm$^{5/2}$)'),\
('L_STAR','$L_\star$ (L$_\odot$)'),\
('P_STAR',r'$\log$ $\left[P\ (\mathrm{days})\right]$'),\
('T_STAR','$T_\star$ (K)'),\
('R_STAR','$R_\star$ (Rsun)'),\
('Q_STAR','$Q_\star$ (days)'),\
('R_INNER_GAS','$R_\mathrm{i,g}$ (R$_\star$)'),\
('AH2O_RATE',r'$\log$ $\left[A_{\mathrm{H}_2\mathrm{O}}/A_{\mathrm{H}_2} \times \dot{M}_\mathrm{g}\ (\mathrm{M}_\odot/\mathrm{yr})\right]$'),\
('F_H2O',r'$\log$ $\left[A_{\mathrm{H}_2\mathrm{O}}/A_{\mathrm{H}_2}\right]$'),\
])
pfn_parts = dict([('MDOT_GAS','mg'),\
('MDOT_DUST',r'md'),\
('STARTYPE','startype'),\
('A_SICB','asicb'),\
('A_AMCSPH','aamcsph'),\
('VEL_INFINITY_GAS','vg'),\
('SHELLMASS','shellmass'),\
('SHELLDENS','dens'),\
('SHELLCOLDENS','coldens'),\
('SHELLDENS2','dens3-2'),\
('L_STAR','lstar'),\
('R_STAR','rstar'),\
('P_STAR','period'),\
('Q_STAR','qstar'),\
('T_STAR','tstar'),\
('P_TYPE','ptype'),\
('F_H2O','ah2o'),\
('DUST_TO_GAS_CHANGE_ML_SP','d2g'),\
('TEMPERATURE_EPSILON_GAS','eps1'),\
('TEMPERATURE_EPSILON2_GAS','eps2'),\
('TEMPERATURE_EPSILON3_GAS','eps3'),\
('RADIUS_EPSILON2_GAS','rt12'),\
('RADIUS_EPSILON3_GAS','rt23'),\
('R_INNER_GAS','rig'),\
('MDOT_CLASS','mdotgrad'),\
('SCD_CLASS','scdgrad'),\
('SHELLMASS_CLASS','shellmassclass'),\
('ABUN_O','abuno'),\
('L_CLASS','lclass'),\
('T_CLASS','tclass'),\
('VG_CLASS','vgclass'),\
('AH2O_RATE','ah2orate'),\
('F_CONT_TYPE','fconttype'),\
('DRIFT_TYPE','drifttype'),\
('ENHANCE_ABUNDANCE_FACTOR_H2O','h2oabunfac'),\
('ABUNDANCE_FILENAME_H2O','h2oabunfile')])
keynames = dict([('MDOT_GAS','$\dot{M}_\mathrm{g}$'),\
('MDOT_DUST',r'$\dot{M}_\mathrm{d}$'),\
('A_SICB','A(SICB)'),\
('STARTYPE','StarType'),\
('A_AMCSPH','A(AMCSPH)'),\
('VEL_INFINITY_GAS',r'$v_{\infty\mathrm{,g}}$'),\
('SHELLMASS',r'$\dot{M}_\mathrm{g}/v_{\infty\mathrm{,g}}$'),\
('SHELLDENS',r'$\bar{\rho}$'),\
('SHELLCOLDENS',r'$\bar{m}$'),\
('SHELLDENS2',r'$\sqrt{\bar{\rho}^2 R_\star}}$'),\
('L_STAR','$L_\star$'),\
('P_STAR','$P$'),\
('Q_STAR','$Q_\star$'),\
('T_STAR','$T_\star$'),\
('P_TYPE','Var.~Type'),\
('F_H2O','$n_{\mathrm{H}_2\mathrm{O}}/n_{\mathrm{H}_2}$'),\
('DUST_TO_GAS_CHANGE_ML_SP','$\psi$'),\
('TEMPERATURE_EPSILON_GAS',r'$\epsilon$'),\
('TEMPERATURE_EPSILON2_GAS',r'$\epsilon_2$'),\
('TEMPERATURE_EPSILON3_GAS',r'$\epsilon_3$'),\
('RADIUS_EPSILON2_GAS','$R_\mathrm{T, 12}$'),\
('RADIUS_EPSILON3_GAS','$R_\mathrm{T, 23}$'),\
('R_INNER_GAS','$R_\mathrm{i,g}$'),\
('MDOT_CLASS',''),\
('SCD_CLASS',''),\
('SHELLMASS_CLASS',''),\
('ABUN_O','$n_{\mathrm{O}}/n_{\mathrm{H}_\mathrm{tot}}$'),\
('L_CLASS',''),\
('T_CLASS',''),\
('VG_CLASS',''),\
('DRIFT_TYPE',''),\
('AH2O_RATE',r'$A_{\mathrm{H}_2\mathrm{O}}/A_{\mathrm{H}_2} \times \dot{M}_\mathrm{g}$'),\
('F_CONT_TYPE','Type F$_\mathrm{cont}$'),\
('ENHANCE_ABUNDANCE_FACTOR_H2O','h2oAbunFac'),\
('ABUNDANCE_FILENAME_H2O','h2oAbunFile')])
keyunits = dict([('MDOT_GAS','$\mathrm{M}_\odot\ \mathrm{yr}^{-1}$'),\
('MDOT_DUST','$\mathrm{M}_\odot\ \mathrm{yr}^{-1}$'),\
('STARTYPE',''),\
('A_SICB',''),\
('A_AMCSPH',''),\
('VEL_INFINITY_GAS','$\mathrm{km\;s}^{-1}$'),\
('SHELLMASS','$\mathrm{M}_\odot\ \mathrm{yr}^{-1}\ \mathrm{km}^{-1}\ \mathrm{s}$'),\
('SHELLDENS','$\mathrm{g\;cm}^{-3}$'),\
('SHELLCOLDENS','$\mathrm{g\;cm}^{-2}$'),\
('SHELLDENS2','$\mathrm{g\;cm}^{5/2}$'),\
('L_STAR','$\mathrm{L}_\odot$'),\
('P_STAR','$\mathrm{days}$'),\
('Q_STAR','$\mathrm{days}$'),\
('T_STAR','$\mathrm{K}$'),\
('P_TYPE',''),\
('F_H2O',''),\
('DUST_TO_GAS_CHANGE_ML_SP',''),\
('TEMPERATURE_EPSILON_GAS',''),\
('TEMPERATURE_EPSILON2_GAS',''),\
('TEMPERATURE_EPSILON3_GAS',''),\
('RADIUS_EPSILON2_GAS','$\mathrm{R}_\star$'),\
('RADIUS_EPSILON3_GAS','$\mathrm{R}_\star$'),\
('R_INNER_GAS','$\mathrm{R}_\star$'),\
('MDOT_CLASS',''),\
('SCD_CLASS',''),\
('SHELLMASS_CLASS',''),\
('ABUN_O',''),\
('T_CLASS',''),\
('VG_CLASS',''),\
('DRIFT_TYPE','Drift'),\
('L_CLASS',''),\
('AH2O_RATE','$\mathrm{M}_\odot\ \mathrm{yr}^{-1}$'),\
('F_CONT_TYPE',''),\
('ENHANCE_ABUNDANCE_FACTOR_H2O',''),\
('ABUNDANCE_FILENAME_H2O','')])
makeints = dict([('MDOT_GAS',0),\
('MDOT_DUST',0),\
('STARTYPE',0),\
('A_SICB',0),\
('A_AMCSPH',0),\
('VEL_INFINITY_GAS',1),\
('SHELLMASS',0),\
('SHELLDENS',0),\
('SHELLCOLDENS',0),\
('SHELLDENS2',0),\
('L_STAR',1),\
('P_STAR',1),\
('Q_STAR',0),\
('T_STAR',1),\
('P_TYPE',0),\
('F_H2O',0),\
('DUST_TO_GAS_CHANGE_ML_SP',0),\
('TEMPERATURE_EPSILON_GAS',0),\
('TEMPERATURE_EPSILON2_GAS',0),\
('TEMPERATURE_EPSILON3_GAS',0),\
('RADIUS_EPSILON2_GAS',0),\
('RADIUS_EPSILON3_GAS',0),\
('R_INNER_GAS',0),\
('MDOT_CLASS',0),\
('SCD_CLASS',0),\
('SHELLMASS_CLASS',0),\
('ABUN_O',0),\
('L_CLASS',0),\
('T_CLASS',0),\
('VG_CLASS',0),\
('DRIFT_TYPE',0),\
('AH2O_RATE',0),\
('F_CONT_TYPE',0),\
('ENHANCE_ABUNDANCE_FACTOR_H2O',0),\
('ABUNDANCE_FILENAME_H2O',0)])
for k in extra_mpar+extra_dpar:
if k not in pfn_parts.keys():
pfn_parts[k] = k.lower().replace('_','')
keynames[k] = k.replace('_','\_')
keyunits[k] = ''
makeints[k] = 0
edists,emdot = array(edists), array(emdot)
n_data = int(n_data)
expar, eypar = array(expar), array(eypar)
if isinstance(extra_mpar,str):
extra_mpar = [extra_mpar]
if isinstance(extra_dpar,str):
extra_dpar = [extra_dpar]
#-- If the x or y parameter is a string, it's a keyword and a ratio is not
# allowed for now.
if isinstance(xpar,str):
xratios = []
if isinstance(ypar,str):
yratios = []
ratios = [xratios,yratios]
sg_dists = array([s['DISTANCE'] for s in sg])
sg_mdot = array([s['MDOT_GAS'] for s in sg])
#-- Collect x and y information to simplify coding later on, as all data
# collection and error handling is the same for x and y.
pars = [xpar,ypar]
epars = [expar,eypar]
eparlogs = [exparlog,eyparlog]
#-- If continuum points are requested, disallow linear fits for now
# Check if enough SEDs are provided given the number of data objects, but
# only in case continuum points are requested.
if isinstance(xpar,float) or isinstance(ypar,float) \
or True in [isinstance(i,float) for i in xratios+yratios]:
add_linear_fit = 0
if mode[0] == 'd' and not seds:
raise IOError('No SEDs given for data objects.')
elif mode[0] == 'c' and n_data != len(seds):
raise IOError('Number of SEDs not equal to number of data objects')
#-- Remember number of data objects versus model objects, also in terms of
# extra conditional parameters for plotting purposes.
if mode[0] == 'm':
#-- In model mode, no errors can be given for any of the parameters.
add_linear_fit = 0
expar = array([])
eypar = array([])
n_data = 0
seds = []
extra_dpar = []
extra_par = extra_mpar
current_par = 'm'
elif mode[0] == 'd':
n_data = len(sg)
extra_mpar = []
if mode[0] != 'm':
extra_par = extra_dpar
current_par = 'd'
for istar,s in enumerate(sg):
if n_data and istar == n_data:
extra_par = extra_mpar
current_par = 'm'
s['EC'] = (current_par,\
tuple([s[par]
for par in extra_par
if not s[par] is None]))
ecl = sorted(list(set([s['EC'] for s in sg])))
ecl_num = []
for ec in ecl:
isg = array([s['EC'] == ec for s in sg])
isgd = isg[:n_data]
isgm = isg[n_data:]
nsgd = len(isg[:n_data][isgd])
nsgm = len(isg[n_data:][isgm])
ecl_num.append((ec,isg,isgd,isgm,nsgd,nsgm))
if len(xratios) > 1 or len(yratios) > 1:
#-- Linear fits can only be added if only one ratio is requested.
add_linear_fit = 0
if add_linear_fit:
add_linear_fit = dict()
xratio, yratio = None, None
if xratios:
xratio = xratios[0]
if yratios:
yratio = yratios[0]
results = corrSG(sg=sg[:n_data],xpar=xpar,ypar=ypar,expar=expar,\
eypar=eypar,xratio=xratio,yratio=yratio,edist=edists,\
show=0,eyratio=eyratio,exratio=exratio,**kwargs)
fitcoef = results['results']
xgrid = []
ygrid = []
this_x = array([alf_xmin,alf_xmax])
add_linear_fit['xmean'] = this_x
add_linear_fit['ymean'] = results['intercept']+results['slope']*this_x
for n in range(0, fitcoef.shape[0],4):
this_y = fitcoef[n,1] + fitcoef[n,0] * this_x
xgrid.append(this_x)
ygrid.append(this_y)
add_linear_fit['xgrid'] = xgrid
add_linear_fit['ygrid'] = ygrid
#-- Check if 'mdot' is requested. Split these up in y and x mdot, because
# the error estimate on the ratio depends on the x or y line strength.
# This is not the case for cont flux because the error estimate is simpler
# there, hence either y or x continuum ratios can remain the same.
if 'mdot' in xratios:
xratios[xratios.index('mdot')] = 'xmdot'
if 'mdot' in yratios:
yratios[yratios.index('mdot')] = 'ymdot'
#-- Select all line strengths/continuum fluxes and errors for the ratios.
# These dicts hold the info for both x and y.
ls_ratios = dict()
els_ratios = dict()
for i in set(yratios+xratios):
#-- mdot must be done separately, due to the cumbersome error estimate
if i == 'xmdot' or i == 'ymdot': continue
elif isinstance(i,float):
#-- getCFlux Converts to W/m2/Hz for unit consistency later on
# (LS/fcont is in Hz, fcont/LS is in Hz^-1)
dists = [s['DISTANCE'] for s in sg[:n_data]] if deredden else []
cflux,eflux = Sed.getCFlux(wav=i,seds=seds,star_grid=sg[n_data:],
deredden=dists)
ls_ratios[i] = cflux
els_ratios[i] = eflux
else:
ratsample = sg[0]['GAS_LINES'][i]
rattrans = Transition.getTransFromStarGrid(sg,ratsample,'sample')
ls,els = Transition.getLineStrengths(rattrans,mode,n_data=n_data)
ls_ratios[i] = ls
els_ratios[i] = els
#-- Set the dictionaries for x and y that will hold all to be plotted data
# No extra keys are added if x/yratios is empty. (xratios empty by
# default if xpar/ypar is a string).
x, xerr, xblend = dict([('x',[])]), dict([('x',[])]), dict([('x',[])])
for k in xratios:
x[k], xerr[k], xblend[k] = [], [], []
y, yerr, yblend = dict([('y',[])]), dict([('y',[])]), dict([('y',[])])
for k in yratios:
y[k], yerr[k], yblend[k] = [], [], []
blends = [xblend,yblend]
xy = [x,y]
errs = [xerr,yerr]
axes = ['x','y']
#-- Select the x/y parameters, making a difference between a Star() key or
# floats (continuum points) or line strengths. Star keys are never used
# in ratios, except MDOT_GAS but that is handled separately anyway.
# In case of line strengths or floats, the blends/xy/errs/axes dicts are
# filled later. In case of Star() keys, sample/seltrans/allint/allerr
# are not used. sample/seltrans is also not used by continuum points, and
# add None.
sample, seltrans, allint, allerr = [] , [], [], []
for par,epar,eparlog,blend,xyi,err,axisstr in zip(pars,epars,eparlogs,\
blends,xy,errs,axes):
if isinstance(par,str):
sg_par = array([s[par] for s in sg])
sample.append(None)
seltrans.append(None)
allint.append(None)
allerr.append(None)
for (ec,isg,isgd,isgm,nsgd,nsgm) in ecl_num:
#-- No blends possible for parameters, so add False for all.
blend[axisstr].append(np.zeros(nsgd) != 0)
xyi[axisstr].append(np.log10(sg_par[isg]))
#-- Check if errors are given (in d or c mode) and check for log
if epar.size and eparlog:
err[axisstr].append(np.concatenate([epar[isgd],np.zeros(nsgm)]))
#-- Check if upper and lower error bars are the same (ie 1d)
elif epar.size and len(epar.shape) == 1:
ll = np.concatenate([-np.log10(1-epar[isgd]),np.zeros(nsgm)])
ul = np.concatenate([np.log10(1+epar[isgd]),np.zeros(nsgm)])
err[axisstr].append([ll,ul])
#-- maybe two separate arrays are given for upper and lower
elif epar.size and epar.shape[0] == 2:
ll = np.concatenate([-np.log10(1-epar[0][isgd]),np.zeros(nsgm)])
ul = np.concatenate([np.log10(1+epar[1][isgd]),np.zeros(nsgm)])
err[axisstr].append([ll,ul])
elif isinstance(par,float):
#-- Extract the continuum fluxes and their errors at the requested
# wavelength
dists = [d for d in sg_dists[:n_data]] if deredden else []
all_iflux, all_ieflux = Sed.getCFlux(wav=par,seds=seds,\
star_grid=sg[n_data:],\
deredden=dists)
sample.append(None)
seltrans.append(None)
allint.append(all_iflux)
allerr.append(all_ieflux)
else:
#-- Select the line strengths and errors of the main transition for
# both axes. This information is only used when par is not a str
# or a float. Later, when ratios are set, this also is only done
# if par is not a str or a float. So no mix-ups can happen. The
# sample and selection transitions are remembered for later.
isample = sg[0]['GAS_LINES'][par]
iseltrans = Transition.getTransFromStarGrid(sg,isample,'sample')
iallint,iallerr = Transition.getLineStrengths(iseltrans,mode,\
n_data=n_data)
sample.append(isample)
seltrans.append(iseltrans)
allint.append(iallint)
allerr.append(iallerr)
#-- If an Mdot ratio is requested, set the second component of the ratio
# and the errors here. (Takes a bit of calc time to estimate errors for
# these ratios)
#-- Works for both line strengths and continuum fluxes in the same way.
for iratios,iallint,iallerr,axisstr in zip(ratios,allint,allerr,axes):
if axisstr+'mdot' in iratios:
line1 = abs(iallint[:n_data])*sg_dists[:n_data]**2/100.**2
line1_err = line1*np.sqrt(iallerr[:n_data]**2+4*edists**2)
line2 = np.log10(sg_mdot[:n_data])
line2_err = emdot
mratios = guessRatio(line1,line1_err,line2,line2_err,\
line2_log=1,positive=1,n_fit=10000)
emrat = array([np.std(mratios[:,istar])/np.mean(mratios[:,istar])
for istar in range(len(line1))])
ls_ratios[axisstr+'mdot'] = sg_mdot
els_ratios[axisstr+'mdot'] = emrat
#-- Complete the x and y data dicts.
# And make sure to remember which stars (in the datagrid) go where.
for (ec,isg,isgd,isgm,nsgd,nsgm) in ecl_num:
#-- Set the main line strength for the x/yaxis if par is not a string
for par,blend,xyi,err,axisstr,iallint,iallerr,iratios \
in zip(pars,blends,xy,errs,axes,allint,allerr,ratios):
if not isinstance(par,str):
#-- continuum points always positive, so never a blend.
blend[axisstr].append(iallint[isgd] < 0)
#-- This irat1 is used later as well but only in this for loop
irat1 = abs(iallint[isg])
xyi[axisstr].append(np.log10(irat1*sg_dists[isg]**2/100.**2))
#-- Set the errors in case data are involved.
# Note that the error bars in log scale can be calculated without
# knowing the real value, since it is a constant added to the real
# value by matplotlib. Unfortunately, going to log space, means the
# upper and lower limits will differ so two lists are needed. Add a
# minus sign to the lower limit, as matplotlib will subtract the ll.
if mode[0] != 'm':
etot = np.sqrt(iallerr[isgd]**2+4*edists[isgd]**2)
ll = np.concatenate([-np.log10(1-etot),np.zeros(nsgm)])
ul = np.concatenate([np.log10(1+etot),np.zeros(nsgm)])
err[axisstr].append([ll,ul])
for k in iratios:
if k == axisstr+'mdot':
#-- Just append the bool array for the 1st component LS
blend[k].append(blend[axisstr][-1])
xyi[k].append(xyi[axisstr][-1]-np.log10(ls_ratios[k][isg]))
if mode[0] != 'm':
etot = els_ratios[k][isgd]
else:
#-- Both continuum flux points are positive. This will never
# evaluate to True when continuum is considered.
blend[k].append((blend[axisstr][-1])+(ls_ratios[k][isgd]<0))
xyi[k].append(np.log10(irat1/abs(ls_ratios[k][isg])))
if mode[0] != 'm':
etot = np.sqrt(iallerr[isgd]**2+els_ratios[k][isgd]**2)
if mode[0] != 'm':
ll = np.concatenate([-np.log10(1-etot),np.zeros(nsgm)])
ul = np.concatenate([np.log10(1+etot),np.zeros(nsgm)])
err[k].append([ll,ul])
#-- Set a number of parameters for the figures.
cfg_dict = Plotting2.readCfg(cfg)
if cfg_dict.has_key('pfn_path'):
pfn_path = cfg_dict['pfn_path']
extra_ppars = dict()
#-- Set the title, depending on if LS are requested vs pars.
pt = ''
if isinstance(ypar,int):
pt += '%s: E$_\mathrm{ul,y}$ = %.1f - %.2f'\
%(str(sample[1]),sample[1].getEnergyUpper(),\
sample[1].wavelength*10**4)
if isinstance(xpar,int):
pt += 'VS %s: E$_\mathrm{ul,x}$ = %.1f - %.2f'\
%(str(sample[0]),sample[0].getEnergyUpper(),\
sample[0].wavelength*10**4)
extra_ppars['plot_title'] = pt
extra_ppars['fontsize_title'] = 20
extra_ppars['figsize'] = (8*np.sqrt(2),8)
extra_ppars['extension'] = '.pdf'
extra_ppars['fontsize_key'] = 14
extra_ppars['linewidth'] = 2
#-- Set the keytags and linestyles based on if data or models are
# plotted.
if not ecl in [[('m',()),('d',())],[('m',())],[('d',())]]:
keytags = []
for curr_par,ec in ecl:
this_par = curr_par == 'm' and extra_mpar or extra_dpar
k = []
for par,v in zip(this_par,ec):
if 'CLASS' in par:
kstr = v[1]
elif par == 'P_TYPE':
kstr = '$\mathrm{%s}$'%v
elif par == 'SHELLCOLDENS':
kstr = '%s = $%.2f$ %s'%(keynames[par],v,keyunits[par])
else:
kstr = '%s = $%s$ %s'\
%(keynames[par],\
makeints[par] and str(int(v)) or str(v),\
keyunits[par])
k.append(kstr)
keytags.append(', '.join(k))
extra_ppars['key_location'] = 'best'
mlinestyles = ['-x','-x','-x','-x','-x','-x','-x',\
'--s','--s','--s','--s','--s','--s','--s',\
'-.+','-.+','-.+','-.+','-.+','-.+','-.+',\
'--p','--p','--p','--p','--p','--p','--p',\
'o-','o-','o-','o-','o-','o-','o-']
dlinestyles = ['o','o','o','o','o','o','o',\
'x','x','x','x','x','x','x',\
's','s','s','s','s','s','s']
colors = ['r','b','g','k','m','y','c']
dline_types = [ls + col for ls,col in zip(dlinestyles,3*colors)]
colors.reverse()
mline_types = [ls + col for ls,col in zip(mlinestyles,5*colors)]
if mode[0] == 'm':
line_types = mline_types[:len(ecl)]
zorder = range(len(ecl))
elif mode[0] == 'd':
line_types = dline_types[:len(ecl)]
zorder = range(len(ecl))
else:
d_ecl = len([ec for ec in ecl if ec[0] == 'd'])
m_ecl = len([ec for ec in ecl if ec[0] == 'm'])
line_types = dline_types[:d_ecl] \
+ mline_types[:m_ecl]
zorder = range(10,10+d_ecl) + range(-m_ecl,0)
markersize = [6]*len(keytags)
pfn_ecl = '_'+'_'.join([pfn_parts[ec] for ec in extra_dpar+extra_mpar])
if pfn_ecl == '_': pfn_ecl = ''
#-- Avoid overhead: If ratios are requested, you generally dont want the
# separate line strengths outside a ratio.
if xratios:
del x['x']
if yratios:
del y['y']
#-- Loop over the X-AXIS KEYS
for xk in x.keys():
#-- extract the ratio transition if applicable
if isinstance(xk,int):
xratsample = sg[0]['GAS_LINES'][xk]
#-- Change the xaxis name/min/max based on each plot
# ie if no errors are given, just take min and max and scale.
#-- if errors are given: full plot.
if not xerr[xk]:
extra_ppars['xmin'] = min([min(xi) for xi in x[xk]])-0.2
extra_ppars['xmax'] = max([max(xi) for xi in x[xk]])+0.2
#-- Set the x-axis title, xmin, xmax and pfn_xtag.
if isinstance(xpar,str):
extra_ppars['xaxis'] = x_titles[xpar]
pfn_xtag = pfn_parts[xpar]
pfn_xrat = ''
elif isinstance(xpar,float):
s1 = r'F_\mathrm{%.1f\ \mu m}'%xpar
if xk == 'xmdot':
extra_ppars['xaxis'] = r'$\log$ $\left[%s/\dot{M}_\mathrm{'%s1+\
r'g}\ (\mathrm{W}/\mathrm{m}^2/\mathrm{Hz}\ '+\
r'\mathrm{yr}/\mathrm{M}_\odot)\right]$'
elif xk == 'x':
extra_ppars['xaxis'] = r'$\log$ $\left[%s\ (\mathrm{W}/\mathrm{m}^2/\mathrm{Hz})\right]$'%s1
elif isinstance(xk,int):
s2 = xratsample.makeAxisLabel()
extra_ppars['xaxis'] = r'$\log$ $\left[%s/%s\ (\mathrm{Hz}^{-1})\right]$'%(s1,s2)
else:
s2 = r'F_\mathrm{%.1f\ \mu m}'%xk
extra_ppars['xaxis'] = r'$\log$ $\left[%s/%s\right]$'%(s1,s2)
pfn_xtag = 'f%.1fmic'%xpar
if xk =='xmdot':
pfn_xrat = '_mdot'
elif xk == 'x':
pfn_xrat = ''
elif isinstance(xk,int):
ms = yratsample.molecule.molecule_short
pfn_xrat = '_%s%i'%(ms,xk)
else:
pfn_xrat = '_f%.1fmic'%xk
else:
#-- Adapt the xaxis title based on the xratios.
s1 = sample[0].makeAxisLabel()
if xk == 'xmdot':
extra_ppars['xaxis'] = r'$\log$ $\left[%s/\dot{M}_\mathrm{'%s1+\
r'g}\ (\mathrm{W}/\mathrm{m}^2\ \mathrm{yr}/\mathrm{M}_\odot)\right]$'
elif xk == 'x':
extra_ppars['xaxis'] = r'$\log$ $\left[%s\ (\mathrm{W}/\mathrm{m}^2)\right]$'%s1
elif isinstance(xk,float):
s2 = r'F_\mathrm{%.1f\ \mu m}'%xk
extra_ppars['xaxis'] = r'$\log$ $\left[%s/%s'%(s1,s2)+\
r'\ (\mathrm{Hz})\right]$'
else:
iml = sample[0].molecule.molecule != xratsample.molecule.molecule
s1 = sample[0].makeAxisLabel(iml)
s2 = xratsample.makeAxisLabel(iml)
extra_ppars['xaxis'] = r'$\log$ $\left[%s/%s\right]$'%(s1,s2)
pfn_xtag = '%s_eul_%i_wl_%.1f'\
%(sample[0].molecule.molecule,\
int(sample[0].getEnergyUpper()),\
float(sample[0].wavelength*10**4))
if xk =='xmdot':
pfn_xrat = '_mdot'
elif xk == 'x':
pfn_xrat = ''
elif isinstance(xk,float):
pfn_xrat = '_f%.1fmic'%xk
else:
ms = xratsample.molecule.molecule_short
pfn_xrat = '_%s%i'%(ms,xk)
#-- Loop over the Y-AXIS KEYS
for yk in y.keys():
if isinstance(yk,int):
yratsample = sg[0]['GAS_LINES'][yk]
#-- Change the yaxis name/min/max based on each plot
# ie if no errors are given, just take min and max and scale.
#-- If erros are given: full plot.
if not yerr[yk]:
extra_ppars['ymin'] = min([min(yi) for yi in y[yk]])-0.2
extra_ppars['ymax'] = max([max(yi) for yi in y[yk]])+0.2
if isinstance(ypar,str):
extra_ppars['yaxis'] = x_titles[ypar]
pfn_ytag = pfn_parts[ypar]
pfn_yrat = ''
elif isinstance(ypar,float):
s1 = r'F_\mathrm{%.1f\ \mu m}'%ypar
if yk == 'ymdot':
extra_ppars['yaxis'] = r'$\log$ $\left[%s/\dot{M}_\mathrm{'%s1+\
r'g}\ (\mathrm{W}/\mathrm{m}^2/\mathrm{Hz}\ '+\
r'\mathrm{yr}/\mathrm{M}_\odot)\right]$'
elif yk == 'y':
extra_ppars['yaxis'] = r'$\log$ $\left[%s\ (\mathrm{W}/\mathrm{m}^2/\mathrm{Hz})\right]$'%s1
elif isinstance(yk,int):
s2 = yratsample.makeAxisLabel()
extra_ppars['yaxis'] = r'$\log$ $\left[%s/%s\ (\mathrm{Hz}^{-1})\right]$'%(s1,s2)
else:
s2 = r'F_\mathrm{%.1f\ \mu m}'%yk
extra_ppars['yaxis'] = r'$\log$ $\left[%s/%s\right]$'%(s1,s2)
pfn_ytag = 'f%.1fmic'%ypar
if yk =='ymdot':
pfn_yrat = '_mdot'
elif yk == 'y':
pfn_yrat = ''
elif isinstance(yk,int):
ms = yratsample.molecule.molecule_short
pfn_yrat = '_%s%i'%(ms,yk)
else:
pfn_yrat = '_f%.1fmic'%yk
else:
s1 = sample[1].makeAxisLabel()
if yk == 'ymdot':
extra_ppars['yaxis'] = r'$\log$ $\left[%s/\dot{M}_\mathrm{'%s1+\
r'g}\ (\mathrm{W}/\mathrm{m}^2\ \mathrm{yr}/\mathrm{M}_\odot)\right]$'
elif yk == 'y':
extra_ppars['yaxis'] = r'$\log$ $\left[%s\ (\mathrm{W}/\mathrm{m}^2)\right]$'%s1
elif isinstance(yk,float):
s2 = r'F_\mathrm{%.1f\ \mu m}'%yk
extra_ppars['yaxis'] = r'$\log$ $\left[%s/%s'%(s1,s2)+\
r'\ (\mathrm{Hz})\right]$'
else:
iml = sample[1].molecule.molecule != yratsample.molecule.molecule
s1 = sample[1].makeAxisLabel(iml)
s2 = yratsample.makeAxisLabel(iml)
extra_ppars['yaxis'] = r'$\log$ $\left[%s/%s\right]$'%(s1,s2)
pfn_ytag = '%s_eul_%i_wl_%.1f'\
%(sample[1].molecule.molecule,\
int(sample[1].getEnergyUpper()),\
float(sample[1].wavelength*10**4))
if yk =='ymdot':
pfn_yrat = '_mdot'
elif yk == 'y':
pfn_yrat = ''
elif isinstance(yk,float):
pfn_yrat = '_f%.1fmic'%yk
else:
ms = yratsample.molecule.molecule_short
pfn_yrat = '_%s%i'%(ms,yk)
#-- Make an extra list of blends.
xb, yb = [[]], [[]]
for blend1,blend2,xi,yi in zip(xblend[xk],yblend[yk],x[xk],y[yk]):
blended = blend1 + blend2
xb[-1].extend(xi[blended])
yb[-1].extend(yi[blended])
if xb[-1]:
extra_ppars['keytags'] = keytags + ['$\mathrm{Blended}$']
extra_ppars['line_types'] = line_types + ['xk']
extra_ppars['markersize'] = markersize + [14]
extra_ppars['zorder'] = zorder + [max(zorder)+1]
else:
xb, yb, = [], []
extra_ppars['keytags'] = keytags
extra_ppars['line_types'] = line_types
extra_ppars['markersize'] = markersize
extra_ppars['zorder'] = zorder
if add_linear_fit:
extra_ppars['keytags'] = extra_ppars['keytags'] + ['Mean Linear fit']
extra_ppars['line_types'] = extra_ppars['line_types'] + ['-g'] + ['-k']*len(add_linear_fit['xgrid'])
extra_ppars['markersize'] = extra_ppars['markersize'] + [4] + [4]*len(add_linear_fit['xgrid'])
extra_ppars['zorder'] = extra_ppars['zorder'] + [min(zorder)-1] + [min(zorder)-2]*len(add_linear_fit['xgrid'])
extra_ppars['alpha'] = [1]*len(x[xk])+[1]*len(xb)+[1]+[0.002]*len(add_linear_fit['xgrid'])
xb.append(add_linear_fit['xmean'])
yb.append(add_linear_fit['ymean'])
xb.extend(add_linear_fit['xgrid'])
yb.extend(add_linear_fit['ygrid'])
#-- Update from cfg file, in case any setting (except plotfile) has
# to be overridden.
extra_ppars.update(cfg_dict)
pfn = os.path.join(pfn_path,'%s_%s%s_vs_%s%s%s'\
%(mode,pfn_ytag,pfn_yrat,pfn_xtag,\
pfn_xrat,pfn_ecl))
extra_ppars['filename'] = pfn
ff = Plotting2.plotCols(x=xb and x[xk]+xb or x[xk],\
y=yb and y[yk]+yb or y[yk],\
yerr=yb and yerr[yk]+[None]*len(yb) or yerr[yk],\
xerr=xb and xerr[xk]+[None]*len(xb) or xerr[xk],\
**extra_ppars)
print ff
if n_data > 0:
print 'Stars plotted (in order of x):'
for xi,yi,(ec,isg,isgd,isgm,nsgd,nsgm) \
in zip(x[xk],y[yk],ecl_num):
if ec[0] == 'm': continue
ifin = np.isfinite(xi) * np.isfinite(yi)
isort = np.argsort(xi[ifin])
sgsort = array(sg)[isgd][ifin][isort]
k = ', '.join(['%s = %s%s%s'
%(keynames[con],\
makeints[con] and str(int(v)) or str(v),\
keyunits[con] and ' ' or '',\
keyunits[con])
for con,v in zip(extra_dpar,ec[1])])
print k, ': %s'%', '.join([s['STAR_NAME'] for s in sgsort])
return ff
def guessRatio(line1,line1_err,line2,line2_err,line1_log=0,line2_log=0,\
n_fit=10000,positive=0):
'''
Guess a ratio of given values with error bars a given number of times.
For both components of the ratio, values are drawn from a Gaussian
distribution around the given value, with the error as sigma.
Can be used for error analysis, or for estimating errors on ratios in case
the errors on the separate components are difficult to propagate properly.
The option to guess a value within error bars in log space is possible. The
resulting value is then converted back to linear space, after which the
ratio is taken.
Negative values can occur, due to the Gaussian nature of the guesses. Keep
this in mind when taking the log of output values. If you do not want
negative values, this van be requested via the positive keyword.
A guess of the ratio, and a standard deviation, can be calculated by taking
the mean and std of the columns in the ouput array.
@param line1: Values of the first parameter on the y-axis
@type line1: array
@param line1_err: Uncertainties on line1, assuming they are in a normal
distribution (1-sigma)
@type line1_err: array
@param line2: Values of the second parameter on the y-axis. Can be an empty
array in case you want to fit a correlation between par and
line1 without any ratio involved. Pass an empty array if you
simply want to randomize a single array, instead of a ratio.
@type line2: array
@param line2_err: Uncertainties on line2, assuming they are in a normal
distribution (1-sigma)
@type line2_err: array
@keyword line1_log: If line 1 is in log scale. In the ratio, 10**line1 is
then taken.
(default: 0)
@type line1_log: bool
@keyword line2_log: if line 2 is in log scale. In the ratio, 10**line1 is
then taken.
(default: 0)
@type line2_log: bool
@keyword n_fit: The number of times the correlation is fitted.
(default: 10000)
@type n_fit: int
@keyword positive: In some cases, you may want to disallow negative values,
eg when you take the log of the results. This switch
allows you to exclude negative values from the output.
Use this with caution! In some case, this will severely
affect the Gaussian distribution.
(default: 0)
@type positive: bool
@return: The n_fit guesses of the requested ratio.
@rtype: array((n_fit,len(line1)))
'''
n_fit = int(n_fit)
line1_log, line2_log = bool(line1_log), bool(line2_log)
line1, line1_err = array(line1), array(line1_err)
line2, line2_err = array(line2), array(line2_err)
yarr = np.empty((n_fit,len(line1)))
if positive:
for n in range(n_fit):
while True:
guess1 = normal(line1, line1_err)
if line1_log:
guess1 = 10**guess1
break
elif False not in (guess1[np.isfinite(guess1)] > 0):
break
while True:
if line2.size != 0:
guess2 = normal(line2, line2_err)
else:
guess2 = 1
break
if line2_log:
guess2 = 10**guess2
break
elif False not in (guess2[np.isfinite(guess2)] > 0):
break
yarr[n] = guess1/guess2
else:
for n in range(n_fit):
guess1 = normal(line1, line1_err)
if line1_log:
guess1 = 10**guess1
if line2.size != 0:
guess2 = normal(line2, line2_err)
else:
guess2 = 1
if line2_log and line2.size != 0:
guess2 = 10**guess2
yarr[n] = guess1/guess2
return yarr
def fitCorrPolyLog(par1,par1_err,par2,par2_err,line1,line1_err,line2,line2_err,\
par1_log=0,par2_log=0,line1_log=0,line2_log=0,n_fit=10000,\
poly_degree=1,show=0,fn_plt='',x_for_yratio=0,\
y_for_xratio=0):
'''
Fit a polynomial to a data set.
The data set can consist of straight up values or of ratios on both the x
and y axis (in log space).
Takes into account errors in both dimensions.
Can be used for e.g. error estimation on a correlation.
@param par1: Values of the first parameter on x-axis.
@type par1: array
@param par1_err: Uncertainties on par, assuming they are in a normal
distribution (1-sigma)
@type par1_err: array
@param par2: Values of the second parameter on the x-axis. Can be an empty
array in case you don't want a ratio on the x-axis.
@type par2: array
@param par2_err: Uncertainties on par2, assuming they are in a normal
distribution (1-sigma)
@type par2_err: array
@param line1: Values of the first parameter on the y-axis
@type line1: array
@param line1_err: Uncertainties on line1, assuming they are in a normal
distribution (1-sigma)
@type line1_err: array
@param line2: Values of the second parameter on the y-axis. Can be an empty
array in case you don't want a ratio on the x-axis.
@type line2: array
@param line2_err: Uncertainties on line2, assuming they are in a normal
distribution (1-sigma)
@type line2_err: array
@keyword par1_log: If par is in log scale. If not, the log will be taken of
par, since this method fits log log correlations.
(default: 0)
@type par1_log: bool
@keyword par2_log: If par2 is in log scale. If not, the log will be taken
of par2, since this method fits log log correlations.
(default: 0)
@type par2_log: bool
@keyword line1_log: If line 1 is in log scale. In the ratio, 10**line1 is
then taken.
(default: 0)
@type line1_log: bool
@keyword line2_log: if line 2 is in log scale. In the ratio, 10**line1 is
then taken.
(default: 0)
@type line2_log: bool
@keyword n_fit: The number of times the correlation is fitted.
(default: 10000)
@type n_fit: int
@keyword poly_degree: The degree of the polynomial that is fitted.
(default: 1)
@type poly_degree: int
@keyword show: Show a plot with the results. If cfg is given, the plot is
adapted, including the filename.
(default: 0)
@type show: bool
@keyword fn_plt: The filename of the plot, in case show is True, and a
saved plot is requested.
(default: '')
@type fn_plt: str
@keyword x_for_yratio: Use the par grid as the second component in the y
ratio. This can be useful for instance if the ratio
has Mdot as numerator, while Mdot is also on the x
axis. In this case, you want to use the same random
value for the same point on both x and y.
(default: 0)
@type x_for_yratio: bool
@keyword y_for_xratio: Use the line1 grid as the second component in the x
ratio. This can be useful for instance if the ratio
has Mdot as numerator, while Mdot is also on the y
axis. In this case, you want to use the same random
value for the same point on both x and y.
(default: 0)
@type y_for_xratio: bool
@return: The fit results are returned for all n_fit fitted functions. The
parameters are the output of np.polyfit and the amount depends on
the polynomial degree.
@rtype: array
'''
poly_degree = int(poly_degree)
fitcoef = np.empty((n_fit, poly_degree+1))
if y_for_xratio:
xarr = guessRatio(par1,par1_err,[],[],line1_log=par1_log,n_fit=n_fit,\
positive=1)
y1 = guessRatio(line1,line1_err,[],[],line1_log=line1_log,n_fit=n_fit,\
positive=1)
else:
xarr = guessRatio(par1,par1_err,par2,par2_err,line1_log=par1_log,\
line2_log=par2_log,n_fit=n_fit,positive=1)
if x_for_yratio:
yarr = guessRatio(line1,line1_err,[],[],line1_log,\
n_fit=n_fit,positive=1)
x1 = guessRatio(par1,par1_err,[],[],line1_log=par1_log,\
n_fit=n_fit,positive=1)
else:
yarr = guessRatio(line1,line1_err,line2,line2_err,line1_log,line2_log,\
n_fit=n_fit,positive=1)
for n,x,y in zip(range(n_fit),xarr,yarr):
#-- Set up the dataset of x and y values.
# The x-values are drawn using gaussian distributed par values.
# The y-values are drawn using gaussian distributed line1/line2 ratio
# values.
# For both x and y, checks are done for negative values, since the
# log10 is taken of both of them.
xl = np.log10(x)
yl = np.log10(y)
if x_for_yratio:
yl = yl - np.log10(x1[n])
if y_for_xratio:
xl = xl - np.log10(y1[n])
fitcoef[n] = np.polyfit(xl, yl, poly_degree)
if show and poly_degree == 1:
#-- Plot a bunch of stuff
plt.figure(1)
plt.clf()
#-- Create a scatter plot of all fitted polynomials.
plt.subplot(221)
x1 = par1
if par2.size != 0:
x2 = par2
else:
x2 = 1
if par1_log:
x1 = 10**x1
if par2_log and par2.size != 0:
x2 = 10**x2
x = np.log10(x1/x2)
y1 = line1
if line2.size != 0:
y2 = line2
else:
y2 = 1
if line1_log:
y1 = 10**y1
if line2_log and line2.size != 0:
y2 = 10**y2
y = np.log10(y1/y2)
plt.scatter(x, y, color='blue', marker='o')
x_grid = np.linspace(1.05*x.min(),0.95*x.max(),100)
for n in range(0, fitcoef.shape[0], 2):
y_grid = fitcoef[n,1] + fitcoef[n,0] * x_grid
plt.plot(x_grid, y_grid, color="red", alpha = 0.006)
plt.xlabel("log(X)")
plt.ylabel("log(Y)")
plt.subplot(222)
plt.hexbin(fitcoef[:,0], fitcoef[:,1], bins=40)
plt.xlabel("Slope")
plt.ylabel("Intercept")
plt.subplot(223)
plt.hist(fitcoef[:,0], bins=40)
plt.xlabel("Slope")
plt.ylabel("N")
plt.subplot(224)
plt.hist(fitcoef[:,1], bins=40)
plt.xlabel("Intercept")
plt.ylabel("N")
if not fn_plt:
plt.show()
else:
plt.savefig(fn_plt)
return fitcoef
def selectDataSG(sg,par,epar,par_co=None,edist=[]):
'''
Tool for selecting data from a star_grid. Mainly used by corrStarGrid() to
define input arrays for the correlation study.
@param sg: The grid of Star() objects.
@type sg: list[Star()]
@param par: The requested parameter. If a string, a Star() keyword is used
and if MDOT_GAS then the error bars are set as log10(3)/3. If
an integer, it is the index of a Transition() in the Star()
and the line strength and -error on it- of the line is taken.
@type par: str/int
@param epar: The 1-sigma error bar of the parameter. Only relevant if par
is a string Star() keyword that is not MDOT_GAS.
@type epar: array
@keyword par_co: Define cutoff values here. Always given as an array of
size 2. If a lower and/or upper boundary is not needed,
it is set as None. In case of MDOT_GAS==xpar, these values
are converted to log scale. Can be set as None if no
cutoff is needed. Only relevant if par is a string.
(default: None)
@type par_co: array
@keyword edist: Give the relative error of the distance here. Used to
estimate an uncertainty on the rescaled line strengths
according to distance (down to 100 pc). Not relevant when
par is a string. An empty array implies no scaling.
(default: [])
@type edist: array
@return: The values for the requested parameter, as well as the
uncertainty, the cutoff values and the log request, ie all
keywords needed for the fitCorrPolyLog method.
@rtype: (arr,arr,arr,bool)
'''
edist = array(edist)
if isinstance(par,str):
vals = array([s[par] for s in sg])
if not par_co is None:
if par_co[0] is None:
par_co[0] = min(vals)
if par_co[1] is None:
par_co[1] = max(vals)
par_co = array(par_co,dtype=np.float)
if par == 'MDOT_GAS':
evals = np.ones(len(sg))*np.log10(3.)/3.
vals = np.log10(vals)
if not par_co is None: par_co = np.log10(par_co)
vals_log = 1
else:
evals = epar
vals_log = 0
else:
sample = sg[0]['GAS_LINES'][par]
trans = Transition.getTransFromStarGrid(sg,sample,'sample')
vals,evals = Transition.getLineStrengths(trans,mode='dint')
#-- For now blends are ignored! Negative values cannot be included for
# the trend analysis and have to be handled more properly at a later
# stage if blends are somehow to be incorporated.
vals = abs(vals)
vals_log = 0
if edist.size:
dists = array([s['DISTANCE'] for s in sg])
vals = vals*dists**2/100.**2
evals = vals*np.sqrt(4*edist**2+evals**2)
else:
evals = vals*evals
#-- Set a default for par_co since a cutoff is never needed for LS.
finvals = vals[np.isfinite(vals)]
par_co = array([min(finvals),max(finvals)])
return (vals,evals,par_co,vals_log)
def corrSG(sg,xpar,ypar,expar=[],eypar=[],xratio=None,yratio=None,\
eyratio=[],exratio=[],edist=[],xpar_co=(None,None),\
ypar_co=(None,None),**kwargs):
'''
A method focused on finding correlations between parameters and/or data
of multiple Star() objects.
@param sg: The grid of Star() objects.
@type sg: list[Star()]
@param xpar: The parameter on x-axis. If a string, a Star() keyword is used
and if MDOT_GAS then the error bars are set as log10(3)/3. If
an integer, it is the index of a Transition() in the Star()
and the line strength and -error on it- of the line is taken.
If the same as yratio, the same guesses are used for both.
@type xpar: str/int
@param ypar: The parameter on y-axis. If a string, a Star() keyword is used
and if MDOT_GAS then the error bars are set as log10(3)/3. If
an integer, it is the index of a Transition() in the Star()
and the line strength and -error on it- of the line is taken.
If the same as xratio, the same guesses are used for both.
@type ypar: int/str
@keyword expar: The 1-sigma error bar of the parameter on the xaxis. Only
relevant if xpar is a string Star() keyword that is not
MDOT_GAS.
(default: [])
@type expar: array
@keyword eypar: The 1-sigma error bar of the parameter on the yaxis. Only
relevant if ypar is a string Star() keyword that is not
MDOT_GAS.
(default: [])
@type eypar: array
@keyword xratio: If a ratio on the x-axis is requested, this is the second
component. Input syntax same as xpar.
(default: None)
@type xratio: int/str
@keyword exratio: The relative error bars on the xratio parameter are given
here. Only relevant if xratio is a string Star() keyword
that is not MDOT_GAS.
(default: [])
@type exratio: array
@keyword yratio: If a ratio on the y-axis is requested, this is the second
component. Input syntax same as ypar.
(default: None)
@type yratio: int/str
@keyword eyratio: The relative error bars on the yratio parameter are given
here. Only relevant if yratio is a string Star() keyword
that is not MDOT_GAS.
(default: [])
@type eyratio: array
@keyword edist: Give the relative error of the distance here. Used to
estimate an uncertainty on the rescaled line strengths
according to distance (down to 100 pc). Not relevant when a
line ratio is requested.
(default: [])
@type edist: array
@keyword xpar_co: Define cutoff values here. Always given as an array of
size 2. If a lower and/or upper boundary is not needed,
it set as None. In case of MDOT_GAS==xpar, these values
are converted to log scale.
(default: array(None,None))
@type xpar_co: array
@keyword ypar_co: Define cutoff values here. Always given as an array of
size 2. If a lower and/or upper boundary is not needed,
it set as None. In case of MDOT_GAS==ypar, these values
are converted to log scale.
(default: array(None,None))
@type ypar_co: array
@keyword kwargs: Extra keywords relevant for fitLinearCorr or
fitCorrPolyLog
@type kwargs: dict
@return: The resulting fit parameters are returned. NYI if poly_degree!=1.
@rtype: dict()
'''
#-- Get the line strengths of the correlated transition where not in every
# case a scaling with distance is required. The absolute uncertainty is
# set here as well.
expar, eypar, edist = array(expar), array(eypar), array(edist)
exratio, eyratio = array(exratio), array(eyratio)
xpar_co, ypar_co = array(xpar_co), array(ypar_co)
ep = dict()
#-- The x-axis parameter is set here.
if xratio is None or isinstance(xratio,str):
xv, exv, xpar_co, ep['par1_log'] = selectDataSG(sg,xpar,expar,xpar_co,\
edist)
else:
xv, exv, xpar_co, ep['par1_log'] = selectDataSG(sg,xpar,expar,xpar_co)
#-- The x-ratio is set here.
if xratio is None or xratio == ypar:
if not xratio is None: ep['y_for_xratio'] = 1
xrat, exrat, ep['par2_log'] = array([]),array([]),0
else:
xrat, exrat, dummy, ep['par2_log'] = selectDataSG(sg,xratio,exratio)
#-- The y-axis parameter is set here
if yratio is None or isinstance(yratio,str):
yv, eyv, ypar_co, ep['line1_log'] = selectDataSG(sg,ypar,eypar,\
ypar_co,edist)
else:
yv, eyv, ypar_co, ep['line1_log'] = selectDataSG(sg,ypar,eypar,ypar_co)
#-- The y-ratio is set here.
if yratio is None or yratio == xpar:
if not yratio is None: ep['x_for_yratio'] = 1
yrat, eyrat, ep['line2_log'] = array([]),array([]),0
else:
yrat, eyrat, dummy, ep['line2_log'] = selectDataSG(sg,yratio,eyratio)
#-- Sort the grids according to xpar.
isort = np.argsort(xv)
xv, exv = xv[isort], exv[isort]
yv, eyv = yv[isort], eyv[isort]
if xrat.size:
xrat, exrat = xrat[isort], exrat[isort]
if yrat.size:
yrat, eyrat = yrat[isort], eyrat[isort]
#-- Make sure there are no NaNs in the grids.
if xrat.size:
xselfinite = np.isfinite(xv/xrat)
else:
xselfinite = np.isfinite(xv)
if yrat.size:
yselfinite = np.isfinite(yv/yrat)
else:
yselfinite = np.isfinite(yv)
selfinite = yselfinite * xselfinite
if xrat.size:
xrat, exrat = xrat[selfinite], exrat[selfinite]
if yrat.size:
yrat, eyrat = yrat[selfinite], eyrat[selfinite]
xv, exv = xv[selfinite], exv[selfinite]
yv, eyv = yv[selfinite], eyv[selfinite]
#-- Select subset based on cutoff vals for x and/or y (1st component only).
xbools = (xv>=xpar_co[0]) * (xv<=xpar_co[1])
ybools = (yv>=ypar_co[0]) * (yv<=ypar_co[1])
bools = xbools*ybools
xv, exv = xv[bools], exv[bools]
yv, eyv = yv[bools], eyv[bools]
if xrat.size:
xrat, exrat = xrat[bools], exrat[bools]
if yrat.size:
yrat, eyrat = yrat[bools], eyrat[bools]
kwargs.update(ep)
allcoef = fitCorrPolyLog(par1=xv,par1_err=exv,par2=xrat,par2_err=exrat,\
line1=yv,line1_err=eyv,line2=yrat,\
line2_err=eyrat,**kwargs)
results = dict()
if kwargs.get('poly_degree',1) == 1:
results['n_points'] = len(xv)
results['slope'] = allcoef[:,0].mean()
results['eslope'] = allcoef[:,0].std()
results['intercept'] = allcoef[:,1].mean()
results['eintercept'] = allcoef[:,1].std()
for x, x_err, name in [(results['slope'],results['eslope'],"Slope"),\
(results['intercept'],results['eintercept'],\
"Intercept")]:
print("{0} = {1} +/- {2}".format(name, x, x_err))
corrcoef = np.corrcoef(allcoef[:,0], allcoef[:,1])
results['corrcoef'] = corrcoef[1,0]
results['covariance'] = results['corrcoef']*results['eslope']\
*results['eintercept']
results['results'] = allcoef
print("The correlation coefficient between slope & intercept is {0}."\
.format(results['corrcoef']))
print("This leads to a covariance of {0} for slope & intercept."\
.format(results['covariance']))
print("Finally, {0} data points were available to produce this fit."\
.format(results['n_points']))
else:
results['poly_degree'] = kwargs.get('poly_degree')
results['results'] = allcoef
return results
|
MarieVdS/ComboCode
|
cc/statistics/TrendAnalysis.py
|
Python
|
gpl-3.0
| 79,115
|
[
"Gaussian"
] |
f1a56cd55c2d63a72972ee4048a8e7b056948a0c8dc1b1a39768a0e532ae0106
|
"""
Viewing Stanford 3D Scanning Repository dragon model
"""
# Copyright (c) 2014-2015, Enthought, Inc.
# Standard library imports
import os
from os.path import join
# Enthought library imports
from mayavi import mlab
### Download the dragon data, if not already on disk ############################
if not os.path.exists('dragon.tar.gz'):
# Download the data
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
print("Downloading dragon model, Please Wait (11MB)")
opener = urlopen(
'http://graphics.stanford.edu/pub/3Dscanrep/dragon/dragon_recon.tar.gz')
open('dragon.tar.gz', 'wb').write(opener.read())
# Extract the data
import tarfile
dragon_tar_file = tarfile.open('dragon.tar.gz')
try:
os.mkdir('dragon_data')
except:
pass
dragon_tar_file.extractall('dragon_data')
dragon_tar_file.close()
# Path to the dragon ply file
dragon_ply_file = join('dragon_data', 'dragon_recon', 'dragon_vrip.ply')
# Render the dragon ply file
mlab.pipeline.surface(mlab.pipeline.open(dragon_ply_file))
mlab.show()
import shutil
shutil.rmtree('dragon_data')
|
dmsurti/mayavi
|
examples/mayavi/mlab/dragon.py
|
Python
|
bsd-3-clause
| 1,143
|
[
"Mayavi"
] |
e8101ecbb41b2e43cb8d251dd4c4f6171408fc0e1ef22cae69183d53a1c983d3
|
import os
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
import numpy as np
def array_almost_equal(a1, a2, tol=np.finfo(type(1.0)).eps):
"""Replacement for old numpy.testing.utils.array_almost_equal."""
return (np.abs(a1 - a2) < tol).all()
from ase.test import NotAvailable
# this test should be run with cmr!
try:
import cmr
except ImportError:
raise NotAvailable('CMR is required')
from ase.calculators.emt import EMT
from ase.io import read, write
from ase.structure import molecule
m1 = molecule('O2')
m1.center(2.0)
write("O2.db", images=m1)
m1.set_calculator(EMT())
e1 = m1.get_potential_energy()
f1 = m1.get_forces()
m2 = read("O2.db")
m2.set_calculator(EMT())
e2 = m2.get_potential_energy()
f2 = m1.get_forces()
# assume atoms definitions are the same if energy/forces are the same: can we do better?
assert abs(e1-e2) < 1.e-6, str(e1) + ' ' + str(e2)
assert array_almost_equal(f1, f2, tol=1.e-6)
# clean
filename = "O2.db"
if os.path.exists(filename): os.unlink(filename)
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/test/cmr/rw.py
|
Python
|
gpl-2.0
| 1,209
|
[
"ASE"
] |
091cecb6d31927ccbbff9bf184918b4bf1193a87728d8f9712d7e42a43a4327f
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""A writer for chemical markup language (CML) files."""
try:
import openbabel as ob
has_openbabel = True
except ImportError:
has_openbabel = False
import xml.etree.cElementTree as ET
from . import filewriter
class CML(filewriter.Writer):
"""A writer for chemical markup language (CML) files."""
def __init__(self, ccdata, *args, **kwargs):
"""Initialize the CML writer object.
Inputs:
ccdata - An instance of ccData, parsed from a logfile.
"""
# Call the __init__ method of the superclass
super(CML, self).__init__(ccdata, *args, **kwargs)
def generate_repr(self):
"""Generate the CML representation of the logfile data."""
# Create the base molecule.
molecule = ET.Element('molecule')
d = {
# Write the namespace directly.
'xmlns': 'http://www.xml-cml.org/schema',
}
if self.jobfilename is not None:
d['id'] = self.jobfilename
_set_attrs(molecule, d)
# Form the listing of all the atoms present.
atomArray = ET.SubElement(molecule, 'atomArray')
if hasattr(self.ccdata, 'atomcoords') and hasattr(self.ccdata, 'atomnos'):
elements = [self.pt.element[Z] for Z in self.ccdata.atomnos]
for atomid in range(self.ccdata.natom):
atom = ET.SubElement(atomArray, 'atom')
x, y, z = self.ccdata.atomcoords[-1][atomid].tolist()
d = {
'id': 'a{}'.format(atomid + 1),
'elementType': elements[atomid],
'x3': '{:.10f}'.format(x),
'y3': '{:.10f}'.format(y),
'z3': '{:.10f}'.format(z),
}
_set_attrs(atom, d)
# Form the listing of all the bonds present.
bondArray = ET.SubElement(molecule, 'bondArray')
if has_openbabel:
for bc in self.bond_connectivities:
bond = ET.SubElement(bondArray, 'bond')
d = {
'atomRefs2': 'a{} a{}'.format(bc[0] + 1, bc[1] + 1),
'order': str(bc[2]),
}
_set_attrs(bond, d)
_indent(molecule)
return _tostring(molecule)
def _set_attrs(element, d):
"""Set all the key-value pairs from a dictionary as element
attributes.
"""
for (k, v) in d.items():
element.set(k, v)
def _indent(elem, level=0):
"""An in-place pretty-print indenter for XML."""
i = "\n" + (level * " ")
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _tostring(element, xml_declaration=True, encoding='utf-8', method='xml'):
"""A reimplementation of tostring() found in ElementTree."""
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ET.ElementTree(element).write(file,
xml_declaration=xml_declaration,
encoding=encoding,
method=method)
return b''.join(data).decode(encoding)
if __name__ == "__main__":
pass
|
gaursagar/cclib
|
src/cclib/io/cmlwriter.py
|
Python
|
bsd-3-clause
| 3,725
|
[
"cclib"
] |
fd40807fda301787ba5ed7d5bd2c3a90cc4b58fd0e22975048b54bfc88a6107f
|
# Copyright (C) 2013 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script appends the sample list of features to the file
# myconfig-sample.h.
#
from __future__ import print_function
import sys, featuredefs, time, string, fileinput
if len(sys.argv) != 2:
print("Usage: %s DEFFILE" % sys.argv[0], file=sys.stderr)
exit(2)
deffilename = sys.argv[1]
#print "Reading definitions from " + deffilename + "..."
defs = featuredefs.defs(deffilename)
#print "Done."
#print "Writing " + hfilename + "..."
featuresdone = set()
for line in fileinput.input(deffilename):
line = line.strip()
# Handle empty and comment lines
if len(line) == 0:
print()
continue
elif line.startswith('#'):
continue
elif line.startswith('//') or line.startswith('/*'):
print(line)
continue
# Tokenify line
feature = line.split(None, 1)[0]
if feature in defs.features and feature not in featuresdone:
print('//#define %s' % feature)
featuresdone.add(feature)
|
gizeminci/espresso
|
config/gen_sampleconfig.py
|
Python
|
gpl-3.0
| 1,710
|
[
"ESPResSo"
] |
c8f54330aa2d2538acf779ac81352949b9ae1aa427bc8ab88beb96638ad92d6b
|
import csv
import random
from time import time
from src.genetic_algorithm import GA
from src.hw1.NeuralNetwork import NeuralNetwork
from src.hw1.Utils.utilities import get_seeds, generate_network
seed_train_set, seed_test_set, seed_train_expected, seed_test_expected = get_seeds("formatted_seeds.txt", 35)
test_data = seed_train_set + seed_test_set
test_expected = seed_train_expected + seed_test_expected
xor_data = [[0, 0], [1, 0], [0, 1], [1, 1]]
xor_exp = [[1, 0], [0, 1], [0, 1], [1, 0]]
layers = 2
neurons = 5
topology = (len(seed_train_set[0]), len(seed_train_expected[0])) # in | out
# topology = (2, 2)
def network_generator():
return generate_network(topology[0], topology[1], neurons, layers)
def get_rand():
return random.uniform(-5, 5)
def breed(p1, p2):
layer = random.randrange(len(p1))
neuron = random.randrange(len(p1[layer]))
child1 = p1[:layer] + p2[layer:]
child2 = p2[:layer] + p1[layer:]
for n in range(neuron):
child1[layer][n], child2[layer][n] = child2[layer][n], child1[layer][n]
return child1, child2
def fitness_function_weights(network):
the_network = NeuralNetwork(manual=True)
the_network.initialize(network=network)
correctness = 0.
for t, ex in zip(test_data, test_expected):
ideal_output = ex
actual = the_network.feed(t)
normalized_output = [round(e, 0) for e in actual]
if ideal_output == normalized_output:
correctness += 1.
return correctness / len(test_data)
if __name__ == '__main__':
ga = GA(pop_size=50, mutation_rate=0.3, fitness=fitness_function_weights,
net_generator=network_generator, single_gen=get_rand, breed_function=breed,
min_fitness=1.1, max_iter=100)
start = time()
fitness, avg, best = ga.run()
print("best is: {}\nwith {} acc\ntook {} generations".format(best, fitness[-1], len(fitness)))
print("took {} seconds".format(time() - start))
with open("network_out_test.csv", 'w', newline="\n") as o:
out = csv.writer(o)
out.writerow(['generation', 'best_fitness', 'avg_value'])
for i, fit in enumerate(fitness):
out.writerow([i, fit, avg[i]])
|
juanpablos/CC5114-Projects
|
Tarea 3/Main.py
|
Python
|
mit
| 2,203
|
[
"NEURON"
] |
3d670419f8e7859a68bbde7c639e18c647e130f4bed79b24377fa934d5a1df71
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides the capability to load netCDF files and interprete them
according to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
References:
[CF] NetCDF Climate and Forecast (CF) Metadata conventions, Version 1.5, October, 2010.
[NUG] NetCDF User's Guide, http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from abc import ABCMeta, abstractmethod
from collections import Iterable, MutableMapping
import os
import re
import warnings
import netCDF4
import numpy as np
import numpy.ma as ma
from iris._deprecation import warn_deprecated
import iris.util
#
# CF parse pattern common to both formula terms and measure CF variables.
#
_CF_PARSE = re.compile(r'''
\s*
(?P<lhs>[\w_]+)
\s*:\s*
(?P<rhs>[\w_]+)
\s*
''', re.VERBOSE)
# NetCDF variable attributes handled by the netCDF4 module and
# therefore automatically classed as "used" attributes.
_CF_ATTRS_IGNORE = set(['_FillValue', 'add_offset', 'missing_value', 'scale_factor', ])
#: Supported dimensionless vertical coordinate reference surface/phemomenon
#: formula terms. Ref: [CF] Appendix D.
reference_terms = dict(atmosphere_sigma_coordinate=['ps'],
atmosphere_hybrid_sigma_pressure_coordinate=['ps'],
atmosphere_hybrid_height_coordinate=['orog'],
atmosphere_sleve_coordinate=['zsurf1', 'zsurf2'],
ocean_sigma_coordinate=['eta', 'depth'],
ocean_s_coordinate=['eta', 'depth'],
ocean_sigma_z_coordinate=['eta', 'depth'],
ocean_s_coordinate_g1=['eta', 'depth'],
ocean_s_coordinate_g2=['eta', 'depth'])
# NetCDF returns a different type for strings depending on Python version.
def _is_str_dtype(var):
return ((six.PY2 and np.issubdtype(var.dtype, np.str)) or
(six.PY3 and np.issubdtype(var.dtype, np.bytes_)))
################################################################################
class CFVariable(six.with_metaclass(ABCMeta, object)):
"""Abstract base class wrapper for a CF-netCDF variable."""
#: Name of the netCDF variable attribute that identifies this
#: CF-netCDF variable.
cf_identity = None
def __init__(self, name, data):
# Accessing the list of netCDF attributes is surprisingly slow.
# Since it's used repeatedly, caching the list makes things
# quite a bit faster.
self._nc_attrs = data.ncattrs()
#: NetCDF variable name.
self.cf_name = name
#: NetCDF4 Variable data instance.
self.cf_data = data
#: Collection of CF-netCDF variables associated with this variable.
self.cf_group = None
#: CF-netCDF formula terms that his variable participates in.
self.cf_terms_by_root = {}
self.cf_attrs_reset()
@staticmethod
def _identify_common(variables, ignore, target):
if ignore is None:
ignore = []
if target is None:
target = variables
elif isinstance(target, six.string_types):
if target not in variables:
raise ValueError('Cannot identify unknown target CF-netCDF variable %r' % target)
target = {target: variables[target]}
else:
raise TypeError('Expect a target CF-netCDF variable name')
return (ignore, target)
@abstractmethod
def identify(self, variables, ignore=None, target=None, warn=True):
"""
Identify all variables that match the criterion for this CF-netCDF variable class.
Args:
* variables:
Dictionary of netCDF4.Variable instance by variable name.
Kwargs:
* ignore:
List of variable names to ignore.
* target:
Name of a single variable to check.
* warn:
Issue a warning if a missing variable is referenced.
Returns:
Dictionary of CFVariable instance by variable name.
"""
pass
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
result = set(self.dimensions).issubset(cf_variable.dimensions)
return result
def __eq__(self, other):
# CF variable names are unique.
return self.cf_name == other.cf_name
def __ne__(self, other):
# CF variable names are unique.
return self.cf_name != other.cf_name
def __hash__(self):
# CF variable names are unique.
return hash(self.cf_name)
def __getattr__(self, name):
# Accessing netCDF attributes is surprisingly slow. Since
# they're often read repeatedly, caching the values makes things
# quite a bit faster.
if name in self._nc_attrs:
self._cf_attrs.add(name)
value = getattr(self.cf_data, name)
setattr(self, name, value)
return value
def __getitem__(self, key):
return self.cf_data.__getitem__(key)
def __len__(self):
return self.cf_data.__len__()
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.cf_name, self.cf_data)
def cf_attrs(self):
"""Return a list of all attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr))
for attr in sorted(self._nc_attrs))
def cf_attrs_ignored(self):
"""Return a list of all ignored attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) & _CF_ATTRS_IGNORE))
def cf_attrs_used(self):
"""Return a list of all accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(self._cf_attrs))
def cf_attrs_unused(self):
"""Return a list of all non-accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) - self._cf_attrs))
def cf_attrs_reset(self):
"""Reset the history of accessed attribute names of the CF-netCDF variable."""
self._cf_attrs = set([item[0] for item in self.cf_attrs_ignored()])
def add_formula_term(self, root, term):
"""
Register the participation of this CF-netCDF variable in a CF-netCDF formula term.
Args:
* root (string):
The name of CF-netCDF variable that defines the CF-netCDF formula_terms attribute.
* term (string):
The associated term name of this variable in the formula_terms definition.
Returns:
None.
"""
self.cf_terms_by_root[root] = term
def has_formula_terms(self):
"""
Determine whether this CF-netCDF variable participates in a CF-netcdf formula term.
Returns:
Boolean.
"""
return bool(self.cf_terms_by_root)
class CFAncillaryDataVariable(CFVariable):
"""
A CF-netCDF ancillary data variable is a variable that provides metadata
about the individual values of another data variable.
Identified by the CF-netCDF variable attribute 'ancillary_variables'.
Ref: [CF] Section 3.4. Ancillary Data.
"""
cf_identity = 'ancillary_variables'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF ancillary data variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for ancillary data variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF ancillary data variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFAncillaryDataVariable(name, variables[name])
return result
class CFAuxiliaryCoordinateVariable(CFVariable):
"""
A CF-netCDF auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a CF-netCDF coordinate variable by definition.
There is no relationship between the name of a CF-netCDF auxiliary coordinate
variable and the name(s) of its dimension(s).
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFLabelVariable`.
Ref: [CF] Chapter 5. Coordinate Systems.
[CF] Section 6.2. Alternative Coordinates.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF auxiliary coordinate variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for auxiliary coordinate variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF auxiliary coordinate variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to non-string type i.e. not a CFLabelVariable.
if not _is_str_dtype(variables[name]):
result[name] = CFAuxiliaryCoordinateVariable(name, variables[name])
return result
class CFBoundaryVariable(CFVariable):
"""
A CF-netCDF boundary variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the boundary variable
provides a description of cell extent.
A CF-netCDF boundary variable will have one more dimension than its associated
CF-netCDF coordinate variable or CF-netCDF auxiliary coordinate variable.
Identified by the CF-netCDF variable attribute 'bounds'.
Ref: [CF] Section 7.1. Cell Boundaries.
"""
cf_identity = 'bounds'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF boundary variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a boundary variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF boundary variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFBoundaryVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the bounds extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFClimatologyVariable(CFVariable):
"""
A CF-netCDF climatology variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the climatology variable
provides a climatological description of cell extent.
A CF-netCDF climatology variable will have one more dimension than its associated
CF-netCDF coordinate variable.
Identified by the CF-netCDF variable attribute 'climatology'.
Ref: [CF] Section 7.4. Climatological Statistics
"""
cf_identity = 'climatology'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF climatology variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a climatology variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF climatology variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFClimatologyVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the climatology extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFCoordinateVariable(CFVariable):
"""
A CF-netCDF coordinate variable is a one-dimensional variable with the same name
as its dimension, and it is defined as a numeric data type with values that are
ordered monotonically. Missing values are not allowed in CF-netCDF coordinate
variables. Also see [NUG] Section 2.3.1.
Identified by the above criterion, there is no associated CF-netCDF variable
attribute.
Ref: [CF] 1.2. Terminology.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True, monotonic=False):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF coordinate variables.
for nc_var_name, nc_var in six.iteritems(target):
if nc_var_name in ignore:
continue
# String variables can't be coordinates
if _is_str_dtype(nc_var):
continue
# Restrict to one-dimensional with name as dimension OR zero-dimensional scalar
if not ((nc_var.ndim == 1 and nc_var_name in nc_var.dimensions) or (nc_var.ndim == 0)):
continue
# Restrict to monotonic?
if monotonic:
data = nc_var[:]
# Gracefully fill a masked coordinate.
if ma.isMaskedArray(data):
data = ma.filled(data)
if nc_var.shape == () or nc_var.shape == (1,) or iris.util.monotonic(data):
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
else:
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
return result
class CFDataVariable(CFVariable):
"""
A CF-netCDF variable containing data pay-load that maps to an Iris :class:`iris.cube.Cube`.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
raise NotImplementedError
class _CFFormulaTermsVariable(CFVariable):
"""
A CF-netCDF formula terms variable corresponds to a term in a formula that
allows dimensional vertical coordinate values to be computed from dimensionless
vertical coordinate values and associated variables at specific grid points.
Identified by the CF-netCDF variable attribute 'formula_terms'.
Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate.
[CF] Appendix D. Dimensionless Vertical Coordinates.
"""
cf_identity = 'formula_terms'
def __init__(self, name, data, formula_root, formula_term):
CFVariable.__init__(self, name, data)
# Register the formula root and term relationship.
self.add_formula_term(formula_root, formula_term)
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF formula terms variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for formula terms variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
# Ensure that term name is lower case, as expected.
term_name = match_group['lhs'].lower()
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF formula term variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
if variable_name not in result:
result[variable_name] = _CFFormulaTermsVariable(variable_name,
variables[variable_name],
nc_var_name, term_name)
else:
result[variable_name].add_formula_term(nc_var_name, term_name)
return result
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.cf_name, self.cf_data,
self.cf_terms_by_root)
class CFGridMappingVariable(CFVariable):
"""
A CF-netCDF grid mapping variable contains a list of specific attributes that
define a particular grid mapping. A CF-netCDF grid mapping variable must contain
the attribute 'grid_mapping_name'.
Based on the value of the 'grid_mapping_name' attribute, there are associated
standard names of CF-netCDF coordinate variables that contain the mapping's
independent variables.
Identified by the CF-netCDF variable attribute 'grid_mapping'.
Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections.
[CF] Appendix F. Grid Mappings.
"""
cf_identity = 'grid_mapping'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all grid mapping variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a grid mapping variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF grid mapping variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFGridMappingVariable(name, variables[name])
return result
class CFLabelVariable(CFVariable):
"""
A CF-netCDF CF label variable is any netCDF variable that contain string
textual information, or labels.
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFAuxiliaryCoordinateVariable`.
Ref: [CF] Section 6.1. Labels.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF label variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for label variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF label variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Register variable, but only allow string type.
var = variables[name]
if _is_str_dtype(var):
result[name] = CFLabelVariable(name, var)
return result
def cf_label_data(self, cf_data_var):
"""
Return the associated CF-netCDF label variable strings.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
String labels.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
# Determine the name of the label string (or length) dimension by
# finding the dimension name that doesn't exist within the data dimensions.
str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))
if len(str_dim_name) != 1:
raise ValueError('Invalid string dimensions for CF-netCDF label variable %r' % self.cf_name)
str_dim_name = str_dim_name[0]
label_data = self[:]
if isinstance(label_data, ma.MaskedArray):
label_data = label_data.filled()
# Determine whether we have a string-valued scalar label
# i.e. a character variable that only has one dimension (the length of the string).
if self.ndim == 1:
data = np.array([''.join(label_data).strip()])
else:
# Determine the index of the string dimension.
str_dim = self.dimensions.index(str_dim_name)
# Calculate new label data shape (without string dimension) and create payload array.
new_shape = tuple(dim_len for i, dim_len in enumerate(self.shape) if i != str_dim)
string_basetype = '|S%d' if six.PY2 else '|U%d'
string_dtype = string_basetype % self.shape[str_dim]
data = np.empty(new_shape, dtype=string_dtype)
for index in np.ndindex(new_shape):
# Create the slice for the label data.
if str_dim == 0:
label_index = (slice(None, None),) + index
else:
label_index = index + (slice(None, None),)
label_string = b''.join(label_data[label_index]).strip()
if six.PY3:
label_string = label_string.decode('utf8')
data[index] = label_string
return data
def cf_label_dimensions(self, cf_data_var):
"""
Return the name of the associated CF-netCDF label variable data dimensions.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
Tuple of label data dimension names.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
return tuple([dim_name for dim_name in self.dimensions if dim_name in cf_data_var.dimensions])
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore label string length dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFMeasureVariable(CFVariable):
"""
A CF-netCDF measure variable is a variable that contains cell areas or volumes.
Identified by the CF-netCDF variable attribute 'cell_measures'.
Ref: [CF] Section 7.2. Cell Measures.
"""
cf_identity = 'cell_measures'
def __init__(self, name, data, measure):
CFVariable.__init__(self, name, data)
#: Associated cell measure of the cell variable
self.cf_measure = measure
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF measure variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for measure variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
measure = match_group['lhs']
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF measure variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
result[variable_name] = CFMeasureVariable(variable_name, variables[variable_name], measure)
return result
################################################################################
class CFGroup(MutableMapping, object):
"""
Represents a collection of 'NetCDF Climate and Forecast (CF) Metadata
Conventions' variables and netCDF global attributes.
"""
def __init__(self):
#: Collection of CF-netCDF variables
self._cf_variables = {}
#: Collection of netCDF global attributes
self.global_attributes = {}
#: Collection of CF-netCDF variables promoted to a CFDataVariable.
self.promoted = {}
def _cf_getter(self, cls):
# Generate dictionary with dictionary comprehension.
return {cf_name: cf_var
for cf_name, cf_var in six.iteritems(self._cf_variables)
if isinstance(cf_var, cls)}
@property
def ancillary_variables(self):
"""Collection of CF-netCDF ancillary variables."""
return self._cf_getter(CFAncillaryDataVariable)
@property
def auxiliary_coordinates(self):
"""Collection of CF-netCDF auxiliary coordinate variables."""
return self._cf_getter(CFAuxiliaryCoordinateVariable)
@property
def bounds(self):
"""Collection of CF-netCDF boundary variables."""
return self._cf_getter(CFBoundaryVariable)
@property
def climatology(self):
"""Collection of CF-netCDF climatology variables."""
return self._cf_getter(CFClimatologyVariable)
@property
def coordinates(self):
"""Collection of CF-netCDF coordinate variables."""
return self._cf_getter(CFCoordinateVariable)
@property
def data_variables(self):
"""Collection of CF-netCDF data pay-load variables."""
return self._cf_getter(CFDataVariable)
@property
def formula_terms(self):
"""Collection of CF-netCDF variables that participate in a CF-netCDF formula term."""
return {cf_name: cf_var
for cf_name, cf_var in six.iteritems(self._cf_variables)
if cf_var.has_formula_terms()}
@property
def grid_mappings(self):
"""Collection of CF-netCDF grid mapping variables."""
return self._cf_getter(CFGridMappingVariable)
@property
def labels(self):
"""Collection of CF-netCDF label variables."""
return self._cf_getter(CFLabelVariable)
@property
def cell_measures(self):
"""Collection of CF-netCDF measure variables."""
return self._cf_getter(CFMeasureVariable)
def keys(self):
"""Return the names of all the CF-netCDF variables in the group."""
return self._cf_variables.keys()
def __len__(self):
return len(self._cf_variables)
def __iter__(self):
for item in self._cf_variables:
yield item
def __setitem__(self, name, variable):
if not isinstance(variable, CFVariable):
raise TypeError('Attempted to add an invalid CF-netCDF variable to the %s' % self.__class__.__name__)
if name != variable.cf_name:
raise ValueError('Mismatch between key name %r and CF-netCDF variable name %r' % (str(name), variable.cf_name))
self._cf_variables[name] = variable
def __getitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot get unknown CF-netCDF variable name %r' % str(name))
return self._cf_variables[name]
def __delitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot delete unknown CF-netcdf variable name %r' % str(name))
del self._cf_variables[name]
def __repr__(self):
result = []
result.append('variables:%d' % len(self._cf_variables))
result.append('global_attributes:%d' % len(self.global_attributes))
result.append('promoted:%d' % len(self.promoted))
return '<%s of %s>' % (self.__class__.__name__, ', '.join(result))
################################################################################
class CFReader(object):
"""
This class allows the contents of a netCDF file to be interpreted according
to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
"""
def __init__(self, filename, warn=False, monotonic=False):
self._filename = os.path.expanduser(filename)
# All CF variable types EXCEPT for the "special cases" of
# CFDataVariable, CFCoordinateVariable and _CFFormulaTermsVariable.
self._variable_types = (CFAncillaryDataVariable, CFAuxiliaryCoordinateVariable,
CFBoundaryVariable, CFClimatologyVariable,
CFGridMappingVariable, CFLabelVariable, CFMeasureVariable)
#: Collection of CF-netCDF variables associated with this netCDF file
self.cf_group = CFGroup()
self._dataset = netCDF4.Dataset(self._filename, mode='r')
# Issue load optimisation warning.
if warn and self._dataset.file_format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
warnings.warn('Optimise CF-netCDF loading by converting data from NetCDF3 ' \
'to NetCDF4 file format using the "nccopy" command.')
self._check_monotonic = monotonic
self._translate()
self._build_cf_groups()
self._reset()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._filename)
def _translate(self):
"""Classify the netCDF variables into CF-netCDF variables."""
netcdf_variable_names = list(self._dataset.variables.keys())
# Identify all CF coordinate variables first. This must be done
# first as, by CF convention, the definition of a CF auxiliary
# coordinate variable may include a scalar CF coordinate variable,
# whereas we want these two types of variables to be mutually exclusive.
coords = CFCoordinateVariable.identify(self._dataset.variables,
monotonic=self._check_monotonic)
self.cf_group.update(coords)
coordinate_names = list(self.cf_group.coordinates.keys())
# Identify all CF variables EXCEPT for the "special cases".
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
self.cf_group.update(variable_type.identify(self._dataset.variables, ignore=ignore))
# Identify global netCDF attributes.
attr_dict = {attr_name: _getncattr(self._dataset, attr_name, '') for
attr_name in self._dataset.ncattrs()}
self.cf_group.global_attributes.update(attr_dict)
# Identify and register all CF formula terms.
formula_terms = _CFFormulaTermsVariable.identify(self._dataset.variables)
for cf_var in six.itervalues(formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
# Ignore formula terms owned by a bounds variable.
if cf_root not in self.cf_group.bounds:
cf_name = cf_var.cf_name
if cf_var.cf_name not in self.cf_group:
self.cf_group[cf_name] = CFAuxiliaryCoordinateVariable(cf_name, cf_var.cf_data)
self.cf_group[cf_name].add_formula_term(cf_root, cf_term)
# Determine the CF data variables.
data_variable_names = set(netcdf_variable_names) - set(self.cf_group.ancillary_variables) - \
set(self.cf_group.auxiliary_coordinates) - set(self.cf_group.bounds) - \
set(self.cf_group.climatology) - set(self.cf_group.coordinates) - \
set(self.cf_group.grid_mappings) - set(self.cf_group.labels) - \
set(self.cf_group.cell_measures)
for name in data_variable_names:
self.cf_group[name] = CFDataVariable(name, self._dataset.variables[name])
def _build_cf_groups(self):
"""Build the first order relationships between CF-netCDF variables."""
def _build(cf_variable):
coordinate_names = list(self.cf_group.coordinates.keys())
cf_group = CFGroup()
# Build CF variable relationships.
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as
# CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
match = variable_type.identify(self._dataset.variables, ignore=ignore,
target=cf_variable.cf_name, warn=False)
# Sanity check dimensionality coverage.
for cf_name, cf_var in six.iteritems(match):
if cf_var.spans(cf_variable):
cf_group[cf_name] = self.cf_group[cf_name]
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_name)
msg = 'Ignoring variable {!r} referenced ' \
'by variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_name,
cf_variable.cf_name,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Build CF data variable relationships.
if isinstance(cf_variable, CFDataVariable):
# Add global netCDF attributes.
cf_group.global_attributes.update(self.cf_group.global_attributes)
# Add appropriate "dimensioned" CF coordinate variables.
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in cf_variable.dimensions if cf_name in
self.cf_group.coordinates})
# Add appropriate "dimensionless" CF coordinate variables.
coordinates_attr = getattr(cf_variable, 'coordinates', '')
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in coordinates_attr.split() if cf_name in
self.cf_group.coordinates})
# Add appropriate formula terms.
for cf_var in six.itervalues(self.cf_group.formula_terms):
for cf_root in cf_var.cf_terms_by_root:
if cf_root in cf_group and cf_var.cf_name not in cf_group:
# Sanity check dimensionality.
if cf_var.spans(cf_variable):
cf_group[cf_var.cf_name] = cf_var
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_var.cf_name)
msg = 'Ignoring formula terms variable {!r} ' \
'referenced by data variable {!r} via ' \
'variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_var.cf_name,
cf_variable.cf_name,
cf_root,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Add the CF group to the variable.
cf_variable.cf_group = cf_group
# Ignored variables are those that cannot be attached to a
# data variable as the dimensionality of that variable is not
# a subset of the dimensionality of the data variable.
ignored = set()
for cf_variable in six.itervalues(self.cf_group):
_build(cf_variable)
# Determine whether there are any formula terms that
# may be promoted to a CFDataVariable.
if iris.FUTURE.netcdf_promote:
# Restrict promotion to only those formula terms
# that are reference surface/phenomenon.
for cf_var in six.itervalues(self.cf_group.formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
cf_root_var = self.cf_group[cf_root]
name = cf_root_var.standard_name or cf_root_var.long_name
terms = reference_terms.get(name, [])
if isinstance(terms, six.string_types) or \
not isinstance(terms, Iterable):
terms = [terms]
cf_var_name = cf_var.cf_name
if cf_term in terms and \
cf_var_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_var_name, cf_var.cf_data)
self.cf_group.promoted[cf_var_name] = data_var
_build(data_var)
break
# Promote any ignored variables.
promoted = set()
not_promoted = ignored.difference(promoted)
while not_promoted:
cf_name = not_promoted.pop()
if cf_name not in self.cf_group.data_variables and \
cf_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_name,
self.cf_group[cf_name].cf_data)
self.cf_group.promoted[cf_name] = data_var
_build(data_var)
# Determine whether there are still any ignored variables
# yet to be promoted.
promoted.add(cf_name)
not_promoted = ignored.difference(promoted)
else:
_netcdf_promote_warning()
def _reset(self):
"""Reset the attribute touch history of each variable."""
for nc_var_name in six.iterkeys(self._dataset.variables):
self.cf_group[nc_var_name].cf_attrs_reset()
def __del__(self):
# Explicitly close dataset to prevent file remaining open.
self._dataset.close()
def _getncattr(dataset, attr, default=None):
"""
Simple wrapper round `netCDF4.Dataset.getncattr` to make it behave
more like `getattr`.
"""
try:
value = dataset.getncattr(attr)
except AttributeError:
value = default
return value
def _netcdf_promote_warning():
msg = ('NetCDF default loading behaviour currently does not expose '
'variables which define reference surfaces for dimensionless '
'vertical coordinates as independent Cubes. This behaviour is '
'deprecated in favour of automatic promotion to Cubes. To switch '
'to the new behaviour, set iris.FUTURE.netcdf_promote to True.')
warn_deprecated(msg)
|
SusanJL/iris
|
lib/iris/fileformats/cf.py
|
Python
|
gpl-3.0
| 44,968
|
[
"NetCDF"
] |
863173195a655154bc78f0c92812eef163a5d1e3e53777d21ef441d3ed577fdf
|
'''
libChEBIpy (c) University of Manchester 2015
libChEBIpy is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from ._base_object import BaseObject
class Reference(BaseObject):
'''Class representing a ChEBI reference.'''
def __init__(self, reference_id, reference_db_name, location_in_ref=None,
reference_name=None):
self.__reference_id = reference_id
self.__reference_db_name = reference_db_name
self.__location_in_ref = location_in_ref
self.__reference_name = reference_name
BaseObject.__init__(self)
def get_reference_id(self):
'''Returns reference_id'''
return self.__reference_id
def get__reference_db_name(self):
'''Returns _reference_db_name'''
return self.__reference_db_name
def get_location_in_ref(self):
'''Returns location_in_ref'''
return self.__location_in_ref
def get_reference_name(self):
'''Returns reference_name'''
return self.__reference_name
|
libChEBI/libChEBIpy
|
libchebipy/_reference.py
|
Python
|
mit
| 1,107
|
[
"VisIt"
] |
3077121cd3464fc736b1f00c6c75f06a2a905e9d23026a4236d65c545d2be5fd
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
from StringIO import StringIO
import tempfile
from common import Bag, BaseTest
from test_s3 import destroyBucket
from c7n.resolver import ValuesFrom, URIResolver
class FakeCache(object):
def __init__(self):
self.state = {}
def get(self, key):
return self.state.get(key)
def save(self, key, data):
self.state[key] = data
class FakeResolver(object):
def __init__(self, contents):
self.contents = contents
def resolve(self, uri):
return self.contents
class ResolverTest(BaseTest):
def test_resolve_s3(self):
session_factory = self.replay_flight_data('test_s3_resolver')
session = session_factory()
client = session.client('s3')
resource = session.resource('s3')
bname = 'custodian-byebye'
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
key = resource.Object(bname, 'resource.json')
content = json.dumps({'moose': {'soup': 'duck'}})
key.put(Body=content, ContentLength=len(content),
ContentType='application/json')
cache = FakeCache()
resolver = URIResolver(session_factory, cache)
uri = 's3://%s/resource.json?RequestPayer=requestor' % bname
data = resolver.resolve(uri)
self.assertEqual(content, data)
self.assertEqual(cache.state.keys(), [('uri-resolver', uri)])
def test_resolve_file(self):
content = json.dumps({'universe': {'galaxy': {'system': 'sun'}}})
cache = FakeCache()
resolver = URIResolver(None, cache)
with tempfile.NamedTemporaryFile(dir=os.getcwd()) as fh:
fh.write(content)
fh.flush()
self.assertEqual(
resolver.resolve('file:%s' % fh.name), content)
class UrlValueTest(BaseTest):
def get_values_from(self, data, content):
mgr = Bag({'session_factory': None, '_cache': None})
values = ValuesFrom(data, mgr)
values.resolver = FakeResolver(content)
return values
def test_json_expr(self):
values = self.get_values_from(
{'url': 'moon', 'expr': '[].bean', 'format': 'json'},
json.dumps([{'bean': 'magic'}]))
self.assertEqual(values.get_values(), ['magic'])
def test_invalid_format(self):
values = self.get_values_from({'url': 'mars'}, '')
self.assertRaises(ValueError, values.get_values)
def test_txt(self):
out = StringIO()
for i in ['a', 'b', 'c', 'd']:
out.write('%s\n' % i)
values = self.get_values_from({'url': 'letters.txt'}, out.getvalue())
self.assertEqual(
values.get_values(),
['a', 'b', 'c', 'd'])
def test_csv_expr(self):
out = StringIO()
writer = csv.writer(out)
writer.writerows([range(5) for r in range(5)])
values = self.get_values_from(
{'url': 'sun.csv', 'expr': '[*][2]'}, out.getvalue())
self.assertEqual(values.get_values(), ['2', '2', '2', '2', '2'])
def test_csv_column(self):
out = StringIO()
writer = csv.writer(out)
writer.writerows([range(5) for r in range(5)])
values = self.get_values_from(
{'url': 'sun.csv', 'expr': 1}, out.getvalue())
self.assertEqual(values.get_values(), ['1', '1', '1', '1', '1'])
def test_csv_raw(self):
out = StringIO()
writer = csv.writer(out)
writer.writerows([range(3, 4) for r in range(5)])
values = self.get_values_from({'url': 'sun.csv'}, out.getvalue())
self.assertEqual(
values.get_values(),
[['3'], ['3'], ['3'], ['3'], ['3']])
|
RyanWolfe/cloud-custodian
|
tests/test_resolver.py
|
Python
|
apache-2.0
| 4,328
|
[
"Galaxy",
"MOOSE"
] |
d1d3d8e6f19f18833c1d4a3184902802dbaf29027e2e28c5aef198c9e4d07ab6
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from warnings import warn
from itertools import product
import numpy as np
from skbio.alignment import TabularMSA
from skbio.alignment._ssw_wrapper import StripedSmithWaterman
from skbio.sequence import DNA, RNA, Protein
from skbio.sequence import GrammaredSequence
from skbio.util import EfficiencyWarning
from skbio.util._decorator import experimental, deprecated
# This is temporary: blosum50 does not exist in skbio yet as per
# issue 161. When the issue is resolved, this should be removed in favor
# of an import.
blosum50 = \
{
'*': {'*': 1, 'A': -5, 'C': -5, 'B': -5, 'E': -5, 'D': -5, 'G': -5,
'F': -5, 'I': -5, 'H': -5, 'K': -5, 'M': -5, 'L': -5,
'N': -5, 'Q': -5, 'P': -5, 'S': -5, 'R': -5, 'T': -5,
'W': -5, 'V': -5, 'Y': -5, 'X': -5, 'Z': -5},
'A': {'*': -5, 'A': 5, 'C': -1, 'B': -2, 'E': -1, 'D': -2, 'G': 0,
'F': -3, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -2,
'N': -1, 'Q': -1, 'P': -1, 'S': 1, 'R': -2, 'T': 0, 'W': -3,
'V': 0, 'Y': -2, 'X': -1, 'Z': -1},
'C': {'*': -5, 'A': -1, 'C': 13, 'B': -3, 'E': -3, 'D': -4,
'G': -3, 'F': -2, 'I': -2, 'H': -3, 'K': -3, 'M': -2,
'L': -2, 'N': -2, 'Q': -3, 'P': -4, 'S': -1, 'R': -4,
'T': -1, 'W': -5, 'V': -1, 'Y': -3, 'X': -1, 'Z': -3},
'B': {'*': -5, 'A': -2, 'C': -3, 'B': 6, 'E': 1, 'D': 6, 'G': -1,
'F': -4, 'I': -4, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 5,
'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -5, 'V': -3,
'Y': -3, 'X': -1, 'Z': 1},
'E': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 6, 'D': 2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': -1, 'R': 0, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5},
'D': {'*': -5, 'A': -2, 'C': -4, 'B': 6, 'E': 2, 'D': 8, 'G': -1,
'F': -5, 'I': -4, 'H': -1, 'K': -1, 'M': -4, 'L': -4, 'N': 2,
'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -5, 'V': -4,
'Y': -3, 'X': -1, 'Z': 1},
'G': {'*': -5, 'A': 0, 'C': -3, 'B': -1, 'E': -3, 'D': -1, 'G': 8,
'F': -4, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0,
'Q': -2, 'P': -2, 'S': 0, 'R': -3, 'T': -2, 'W': -3, 'V': -4,
'Y': -3, 'X': -1, 'Z': -2},
'F': {'*': -5, 'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -5,
'G': -4, 'F': 8, 'I': 0, 'H': -1, 'K': -4, 'M': 0, 'L': 1,
'N': -4, 'Q': -4, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 1,
'V': -1, 'Y': 4, 'X': -1, 'Z': -4},
'I': {'*': -5, 'A': -1, 'C': -2, 'B': -4, 'E': -4, 'D': -4,
'G': -4, 'F': 0, 'I': 5, 'H': -4, 'K': -3, 'M': 2, 'L': 2,
'N': -3, 'Q': -3, 'P': -3, 'S': -3, 'R': -4, 'T': -1,
'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'*': -5, 'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2,
'F': -1, 'I': -4, 'H': 10, 'K': 0, 'M': -1, 'L': -3, 'N': 1,
'Q': 1, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -3, 'V': -4,
'Y': 2, 'X': -1, 'Z': 0},
'K': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 6, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': 0, 'R': 3, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 1},
'M': {'*': -5, 'A': -1, 'C': -2, 'B': -3, 'E': -2, 'D': -4,
'G': -3, 'F': 0, 'I': 2, 'H': -1, 'K': -2, 'M': 7, 'L': 3,
'N': -2, 'Q': 0, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -1,
'V': 1, 'Y': 0, 'X': -1, 'Z': -1},
'L': {'*': -5, 'A': -2, 'C': -2, 'B': -4, 'E': -3, 'D': -4,
'G': -4, 'F': 1, 'I': 2, 'H': -3, 'K': -3, 'M': 3, 'L': 5,
'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -1,
'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'*': -5, 'A': -1, 'C': -2, 'B': 5, 'E': 0, 'D': 2, 'G': 0,
'F': -4, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -4, 'N': 7,
'Q': 0, 'P': -2, 'S': 1, 'R': -1, 'T': 0, 'W': -4, 'V': -3,
'Y': -2, 'X': -1, 'Z': 0},
'Q': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2,
'F': -4, 'I': -3, 'H': 1, 'K': 2, 'M': 0, 'L': -2, 'N': 0,
'Q': 7, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -1, 'V': -3,
'Y': -1, 'X': -1, 'Z': 4},
'P': {'*': -5, 'A': -1, 'C': -4, 'B': -2, 'E': -1, 'D': -1,
'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -3,
'L': -4, 'N': -2, 'Q': -1, 'P': 10, 'S': -1, 'R': -3,
'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': -1},
'S': {'*': -5, 'A': 1, 'C': -1, 'B': 0, 'E': -1, 'D': 0, 'G': 0,
'F': -3, 'I': -3, 'H': -1, 'K': 0, 'M': -2, 'L': -3, 'N': 1,
'Q': 0, 'P': -1, 'S': 5, 'R': -1, 'T': 2, 'W': -4, 'V': -2,
'Y': -2, 'X': -1, 'Z': 0},
'R': {'*': -5, 'A': -2, 'C': -4, 'B': -1, 'E': 0, 'D': -2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 3, 'M': -2, 'L': -3, 'N': -1,
'Q': 1, 'P': -3, 'S': -1, 'R': 7, 'T': -1, 'W': -3, 'V': -3,
'Y': -1, 'X': -1, 'Z': 0},
'T': {'*': -5, 'A': 0, 'C': -1, 'B': 0, 'E': -1, 'D': -1, 'G': -2,
'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0,
'Q': -1, 'P': -1, 'S': 2, 'R': -1, 'T': 5, 'W': -3, 'V': 0,
'Y': -2, 'X': -1, 'Z': -1},
'W': {'*': -5, 'A': -3, 'C': -5, 'B': -5, 'E': -3, 'D': -5,
'G': -3, 'F': 1, 'I': -3, 'H': -3, 'K': -3, 'M': -1, 'L': -2,
'N': -4, 'Q': -1, 'P': -4, 'S': -4, 'R': -3, 'T': -3,
'W': 15, 'V': -3, 'Y': 2, 'X': -1, 'Z': -2},
'V': {'*': -5, 'A': 0, 'C': -1, 'B': -3, 'E': -3, 'D': -4, 'G': -4,
'F': -1, 'I': 4, 'H': -4, 'K': -3, 'M': 1, 'L': 1, 'N': -3,
'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 5,
'Y': -1, 'X': -1, 'Z': -3},
'Y': {'*': -5, 'A': -2, 'C': -3, 'B': -3, 'E': -2, 'D': -3,
'G': -3, 'F': 4, 'I': -1, 'H': 2, 'K': -2, 'M': 0, 'L': -1,
'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -1, 'T': -2, 'W': 2,
'V': -1, 'Y': 8, 'X': -1, 'Z': -2},
'X': {'*': -5, 'A': -1, 'C': -1, 'B': -1, 'E': -1, 'D': -1,
'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1,
'L': -1, 'N': -1, 'Q': -1, 'P': -1, 'S': -1, 'R': -1,
'T': -1, 'W': -1, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 5, 'D': 1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0,
'Q': 4, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -2, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5}}
@experimental(as_of="0.4.0")
def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
"""Locally align exactly two nucleotide seqs with Smith-Waterman
Parameters
----------
seq1 : DNA or RNA
The first unaligned sequence.
seq2 : DNA or RNA
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be DNA or RNA, not type %r"
% type(seq).__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
"""Locally align exactly two protein seqs with Smith-Waterman
Parameters
----------
seq1 : Protein
The first unaligned sequence.
seq2 : Protein
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, Protein):
raise TypeError(
"`seq1` and `seq2` must be Protein, not type %r"
% type(seq).__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
"""Locally align exactly two seqs with Smith-Waterman
Parameters
----------
seq1 : GrammaredSequence
The first unaligned sequence.
seq2 : GrammaredSequence
The second unaligned sequence.
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm was originally described in [1]_. The scikit-bio
implementation was validated against the EMBOSS water web server [2]_.
References
----------
.. [1] Identification of common molecular subsequences.
Smith TF, Waterman MS.
J Mol Biol. 1981 Mar 25;147(1):195-7.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_water/
"""
warn("You're using skbio's python implementation of Smith-Waterman "
"alignment. This will be very slow (e.g., thousands of times slower) "
"than skbio.alignment.local_pairwise_align_ssw.",
EfficiencyWarning)
for seq in seq1, seq2:
if not isinstance(seq, GrammaredSequence):
raise TypeError(
"`seq1` and `seq2` must be %r subclasses, not type %r" %
(GrammaredSequence.__name__, type(seq).__name__))
if type(seq1) is not type(seq2):
raise TypeError(
"`seq1` and `seq2` must be the same type: %r != %r"
% (type(seq1).__name__, type(seq2).__name__))
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
score_matrix, traceback_matrix = _compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=0.0,
init_matrices_f=_init_matrices_sw)
end_row_position, end_col_position =\
np.unravel_index(np.argmax(score_matrix), score_matrix.shape)
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@experimental(as_of="0.4.0")
def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=1, mismatch_score=-2,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align nucleotide seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : DNA, RNA, or TabularMSA[DNA|RNA]
The first unaligned sequence(s).
seq2 : DNA, RNA, or TabularMSA[DNA|RNA]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be DNA, RNA, or TabularMSA, not type "
"%r" % type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype,
(DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
"not dtype %r" % seq.dtype.__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align pair of protein seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : Protein or TabularMSA[Protein]
The first unaligned sequence(s).
seq2 : Protein or TabularMSA[Protein]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, (Protein, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be Protein or TabularMSA, not type %r"
% type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with Protein dtype, "
"not dtype %r" % seq.dtype.__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, penalize_terminal_gaps=False):
"""Globally align a pair of seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : GrammaredSequence or TabularMSA
The first unaligned sequence(s).
seq2 : GrammaredSequence or TabularMSA
The second unaligned sequence(s).
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm (in a slightly more basic form) was originally described
in [1]_. The scikit-bio implementation was validated against the
EMBOSS needle web server [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] A general method applicable to the search for similarities in
the amino acid sequence of two proteins.
Needleman SB, Wunsch CD.
J Mol Biol. 1970 Mar;48(3):443-53.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_needle/
"""
warn("You're using skbio's python implementation of Needleman-Wunsch "
"alignment. This is known to be very slow (e.g., thousands of times "
"slower than a native C implementation). We'll be adding a faster "
"version soon (see https://github.com/biocore/scikit-bio/issues/254 "
"to track progress on this).", EfficiencyWarning)
for seq in seq1, seq2:
# We don't need to check the case where `seq` is a `TabularMSA` with a
# dtype that isn't a subclass of `GrammaredSequence`, this is
# guaranteed by `TabularMSA`.
if not isinstance(seq, (GrammaredSequence, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be GrammaredSequence subclasses or "
"TabularMSA, not type %r" % type(seq).__name__)
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
if seq1.dtype is not seq2.dtype:
raise TypeError(
"`seq1` and `seq2` must have the same dtype: %r != %r"
% (seq1.dtype.__name__, seq2.dtype.__name__))
if penalize_terminal_gaps:
init_matrices_f = _init_matrices_nw
else:
init_matrices_f = _init_matrices_nw_no_terminal_gap_penalty
score_matrix, traceback_matrix = \
_compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=-np.inf,
init_matrices_f=init_matrices_f,
penalize_terminal_gaps=penalize_terminal_gaps)
end_row_position = traceback_matrix.shape[0] - 1
end_col_position = traceback_matrix.shape[1] - 1
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@experimental(as_of="0.4.0")
def local_pairwise_align_ssw(sequence1, sequence2, **kwargs):
"""Align query and target sequences with Striped Smith-Waterman.
Parameters
----------
sequence1 : DNA, RNA, or Protein
The first unaligned sequence
sequence2 : DNA, RNA, or Protein
The second unaligned sequence
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
Notes
-----
This is a wrapper for the SSW package [1]_.
For a complete list of optional keyword-arguments that can be provided,
see ``skbio.alignment.StripedSmithWaterman``.
The following kwargs will not have any effect: `suppress_sequences`,
`zero_index`, and `protein`
If an alignment does not meet a provided filter, `None` will be returned.
References
----------
.. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
Applications". PLOS ONE (2013). Web. 11 July 2014.
http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
See Also
--------
skbio.alignment.StripedSmithWaterman
"""
for seq in sequence1, sequence2:
if not isinstance(seq, (DNA, RNA, Protein)):
raise TypeError(
"`sequence1` and `sequence2` must be DNA, RNA, or Protein, "
"not type %r" % type(seq).__name__)
if type(sequence1) is not type(sequence2):
raise TypeError(
"`sequence1` and `sequence2` must be the same type: %r != %r"
% (type(sequence1).__name__, type(sequence2).__name__))
# We need the sequences for `TabularMSA` to make sense, so don't let the
# user suppress them.
kwargs['suppress_sequences'] = False
kwargs['zero_index'] = True
kwargs['protein'] = False
if isinstance(sequence1, Protein):
kwargs['protein'] = True
query = StripedSmithWaterman(str(sequence1), **kwargs)
alignment = query(str(sequence2))
# If there is no cigar, then it has failed a filter. Return None.
if not alignment.cigar:
return None
start_end = None
if alignment.query_begin != -1:
start_end = [
(alignment.query_begin, alignment.query_end),
(alignment.target_begin, alignment.target_end_optimal)
]
metadata1 = metadata2 = None
if sequence1.has_metadata():
metadata1 = sequence1.metadata
if sequence2.has_metadata():
metadata2 = sequence2.metadata
constructor = type(sequence1)
msa = TabularMSA([
constructor(alignment.aligned_query_sequence, metadata=metadata1,
validate=False),
constructor(alignment.aligned_target_sequence, metadata=metadata2,
validate=False)
])
return msa, alignment.optimal_alignment_score, start_end
@deprecated(as_of="0.4.0", until="0.5.1",
reason="Will be replaced by a SubstitutionMatrix class. To track "
"progress, see [#161]"
"(https://github.com/biocore/scikit-bio/issues/161).")
def make_identity_substitution_matrix(match_score, mismatch_score,
alphabet='ACGTU'):
"""Generate substitution matrix where all matches are scored equally
Parameters
----------
match_score : int, float
The score that should be assigned for all matches. This value is
typically positive.
mismatch_score : int, float
The score that should be assigned for all mismatches. This value is
typically negative.
alphabet : iterable of str, optional
The characters that should be included in the substitution matrix.
Returns
-------
dict of dicts
All characters in alphabet are keys in both dictionaries, so that any
pair of characters can be looked up to get their match or mismatch
score.
"""
result = {}
for c1 in alphabet:
row = {}
for c2 in alphabet:
if c1 == c2:
row[c2] = match_score
else:
row[c2] = mismatch_score
result[c1] = row
return result
# Functions from here allow for generalized (global or local) alignment. I
# will likely want to put these in a single object to make the naming a little
# less clunky.
def _coerce_alignment_input_type(seq):
if isinstance(seq, GrammaredSequence):
return TabularMSA([seq])
else:
return seq
_traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
'uninitialized': -1, 'alignment-end': 0}
def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, :] = _traceback_encoding['alignment-end']
traceback_matrix[:, 0] = _traceback_encoding['alignment-end']
return score_matrix, traceback_matrix
def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
score_matrix[i, 0] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
score_matrix[0, i] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _init_matrices_nw_no_terminal_gap_penalty(
aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, gap_chars):
substitution_score = 0
for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
if aln1_char in gap_chars or aln2_char in gap_chars:
substitution_score += gap_substitution_score
else:
try:
substitution_score += \
substitution_matrix[aln1_char][aln2_char]
except KeyError:
offending_chars = \
[c for c in (aln1_char, aln2_char)
if c not in substitution_matrix]
raise ValueError(
"One of the sequences contains a character that is "
"not contained in the substitution matrix. Are you "
"using an appropriate substitution matrix for your "
"sequence type (e.g., a nucleotide substitution "
"matrix does not make sense for aligning protein "
"sequences)? Does your sequence contain invalid "
"characters? The offending character(s) is: "
" %s." % ', '.join(offending_chars))
substitution_score /= (len(aln1_chars) * len(aln2_chars))
return substitution_score
def _compute_score_and_traceback_matrices(
aln1, aln2, gap_open_penalty, gap_extend_penalty, substitution_matrix,
new_alignment_score=-np.inf, init_matrices_f=_init_matrices_nw,
penalize_terminal_gaps=True, gap_substitution_score=0):
"""Return dynamic programming (score) and traceback matrices.
A note on the ``penalize_terminal_gaps`` parameter. When this value is
``False``, this function is no longer true Smith-Waterman/Needleman-Wunsch
scoring, but when ``True`` it can result in biologically irrelevant
artifacts in Needleman-Wunsch (global) alignments. Specifically, if one
sequence is longer than the other (e.g., if aligning a primer sequence to
an amplification product, or searching for a gene in a genome) the shorter
sequence will have a long gap inserted. The parameter is ``True`` by
default (so that this function computes the score and traceback matrices as
described by the original authors) but the global alignment wrappers pass
``False`` by default, so that the global alignment API returns the result
that users are most likely to be looking for.
"""
aln1_length = aln1.shape.position
aln2_length = aln2.shape.position
# cache some values for quicker/simpler access
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
new_alignment_score = (new_alignment_score, aend)
# Initialize a matrix to use for scoring the alignment and for tracing
# back the best alignment
score_matrix, traceback_matrix = init_matrices_f(
aln1, aln2, gap_open_penalty, gap_extend_penalty)
# Iterate over the characters in aln2 (which corresponds to the vertical
# sequence in the matrix)
for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(
ignore_metadata=True), 1):
aln2_chars = str(aln2_chars)
# Iterate over the characters in aln1 (which corresponds to the
# horizontal sequence in the matrix)
for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(
ignore_metadata=True), 1):
aln1_chars = str(aln1_chars)
# compute the score for a match/mismatch
substitution_score = _compute_substitution_score(
aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, aln1.dtype.gap_chars)
diag_score = \
(score_matrix[aln2_pos-1, aln1_pos-1] + substitution_score,
match)
# compute the score for adding a gap in aln2 (vertical)
if not penalize_terminal_gaps and (aln1_pos == aln1_length):
# we've reached the end of aln1, so adding vertical gaps
# (which become gaps in aln1) should no longer
# be penalized (if penalize_terminal_gaps == False)
up_score = (score_matrix[aln2_pos-1, aln1_pos], vgap)
elif traceback_matrix[aln2_pos-1, aln1_pos] == vgap:
# gap extend, because the cell above was also a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_extend_penalty,
vgap)
else:
# gap open, because the cell above was not a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_open_penalty,
vgap)
# compute the score for adding a gap in aln1 (horizontal)
if not penalize_terminal_gaps and (aln2_pos == aln2_length):
# we've reached the end of aln2, so adding horizontal gaps
# (which become gaps in aln2) should no longer
# be penalized (if penalize_terminal_gaps == False)
left_score = (score_matrix[aln2_pos, aln1_pos-1], hgap)
elif traceback_matrix[aln2_pos, aln1_pos-1] == hgap:
# gap extend, because the cell to the left was also a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_extend_penalty,
hgap)
else:
# gap open, because the cell to the left was not a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_open_penalty,
hgap)
# identify the largest score, and use that information to populate
# the score and traceback matrices
best_score = _first_largest([new_alignment_score, left_score,
diag_score, up_score])
score_matrix[aln2_pos, aln1_pos] = best_score[0]
traceback_matrix[aln2_pos, aln1_pos] = best_score[1]
return score_matrix, traceback_matrix
def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
start_col):
# cache some values for simpler reference
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
gap_character = aln1.dtype.default_gap_char
# initialize the result alignments
aln1_sequence_count = aln1.shape.sequence
aligned_seqs1 = [[] for e in range(aln1_sequence_count)]
aln2_sequence_count = aln2.shape.sequence
aligned_seqs2 = [[] for e in range(aln2_sequence_count)]
current_row = start_row
current_col = start_col
best_score = score_matrix[current_row, current_col]
current_value = None
while current_value != aend:
current_value = traceback_matrix[current_row, current_col]
if current_value == match:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
current_col -= 1
elif current_value == vgap:
for aligned_seq in aligned_seqs1:
aligned_seq.append(gap_character)
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
elif current_value == hgap:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq in aligned_seqs2:
aligned_seq.append(gap_character)
current_col -= 1
elif current_value == aend:
continue
else:
raise ValueError(
"Invalid value in traceback matrix: %s" % current_value)
for i, (aligned_seq, original) in enumerate(zip(aligned_seqs1, aln1)):
aligned_seq = ''.join(aligned_seq)[::-1]
constructor = aln1.dtype
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned_seqs1[i] = constructor(aligned_seq, metadata=metadata,
validate=False)
for i, (aligned_seq, original) in enumerate(zip(aligned_seqs2, aln2)):
aligned_seq = ''.join(aligned_seq)[::-1]
constructor = aln2.dtype
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned_seqs2[i] = constructor(aligned_seq, metadata=metadata,
validate=False)
return aligned_seqs1, aligned_seqs2, best_score, current_col, current_row
def _first_largest(scores):
""" Similar to max, but returns the first element achieving the high score
If max receives a tuple, it will break a tie for the highest value
of entry[i] with entry[i+1]. We don't want that here - to better match
with the results of other tools, we want to be able to define which
entry is returned in the case of a tie.
"""
result = scores[0]
for score, direction in scores[1:]:
if score > result[0]:
result = (score, direction)
return result
|
kdmurray91/scikit-bio
|
skbio/alignment/_pairwise.py
|
Python
|
bsd-3-clause
| 44,605
|
[
"BLAST",
"scikit-bio"
] |
b3005859fb6c543bea05b33c237b4dd042b0846a086353062c935e6678f8c89b
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import os
import logging
import chardet
import codecs
import re
from openlp.core.lib import translate
from openlp.core.utils import AppLocation
from openlp.plugins.bibles.lib.db import BibleDB, BiblesResourcesDB
log = logging.getLogger(__name__)
def replacement(match):
return match.group(2).upper()
class OSISBible(BibleDB):
"""
`OSIS <http://www.bibletechnologies.net/>`_ Bible format importer class.
"""
log.info('BibleOSISImpl loaded')
def __init__(self, parent, **kwargs):
log.debug(self.__class__.__name__)
BibleDB.__init__(self, parent, **kwargs)
self.filename = kwargs['filename']
self.language_regex = re.compile(r'<language.*>(.*?)</language>')
self.verse_regex = re.compile(r'<verse osisID="([a-zA-Z0-9 ]*).([0-9]*).([0-9]*)">(.*?)</verse>')
self.note_regex = re.compile(r'<note(.*?)>(.*?)</note>')
self.title_regex = re.compile(r'<title(.*?)>(.*?)</title>')
self.milestone_regex = re.compile(r'<milestone(.*?)/>')
self.fi_regex = re.compile(r'<FI>(.*?)<Fi>')
self.rf_regex = re.compile(r'<RF>(.*?)<Rf>')
self.lb_regex = re.compile(r'<lb(.*?)>')
self.lg_regex = re.compile(r'<lg(.*?)>')
self.l_regex = re.compile(r'<l (.*?)>')
self.w_regex = re.compile(r'<w (.*?)>')
self.q_regex = re.compile(r'<q(.*?)>')
self.q1_regex = re.compile(r'<q(.*?)level="1"(.*?)>')
self.q2_regex = re.compile(r'<q(.*?)level="2"(.*?)>')
self.trans_regex = re.compile(r'<transChange(.*?)>(.*?)</transChange>')
self.divine_name_regex = re.compile(r'<divineName(.*?)>(.*?)</divineName>')
self.spaces_regex = re.compile(r'([ ]{2,})')
filepath = os.path.join(
AppLocation.get_directory(AppLocation.PluginsDir), 'bibles', 'resources', 'osisbooks.csv')
def do_import(self, bible_name=None):
"""
Loads a Bible from file.
"""
log.debug('Starting OSIS import from "%s"' % self.filename)
detect_file = None
db_book = None
osis = None
success = True
last_chapter = 0
match_count = 0
self.wizard.increment_progress_bar(translate('BiblesPlugin.OsisImport',
'Detecting encoding (this may take a few minutes)...'))
try:
detect_file = open(self.filename, 'r')
details = chardet.detect(detect_file.read(1048576))
detect_file.seek(0)
lines_in_file = int(len(detect_file.readlines()))
except IOError:
log.exception('Failed to detect OSIS file encoding')
return
finally:
if detect_file:
detect_file.close()
try:
osis = codecs.open(self.filename, 'r', details['encoding'])
repl = replacement
language_id = False
# Decide if the bible propably contains only NT or AT and NT or
# AT, NT and Apocrypha
if lines_in_file < 11500:
book_count = 27
chapter_count = 260
elif lines_in_file < 34200:
book_count = 66
chapter_count = 1188
else:
book_count = 67
chapter_count = 1336
for file_record in osis:
if self.stop_import_flag:
break
# Try to find the bible language
if not language_id:
language_match = self.language_regex.search(file_record)
if language_match:
language = BiblesResourcesDB.get_language(
language_match.group(1))
if language:
language_id = language['id']
self.save_meta('language_id', language_id)
continue
match = self.verse_regex.search(file_record)
if match:
# Set meta language_id if not detected till now
if not language_id:
language_id = self.get_language(bible_name)
if not language_id:
log.exception('Importing books from "%s" failed' % self.filename)
return False
match_count += 1
book = str(match.group(1))
chapter = int(match.group(2))
verse = int(match.group(3))
verse_text = match.group(4)
book_ref_id = self.get_book_ref_id_by_name(book, book_count, language_id)
if not book_ref_id:
log.exception('Importing books from "%s" failed' % self.filename)
return False
book_details = BiblesResourcesDB.get_book_by_id(book_ref_id)
if not db_book or db_book.name != book_details['name']:
log.debug('New book: "%s"' % book_details['name'])
db_book = self.create_book(
book_details['name'],
book_ref_id,
book_details['testament_id'])
if last_chapter == 0:
self.wizard.progress_bar.setMaximum(chapter_count)
if last_chapter != chapter:
if last_chapter != 0:
self.session.commit()
self.wizard.increment_progress_bar(translate('BiblesPlugin.OsisImport', 'Importing %s %s...',
'Importing <book name> <chapter>...') % (book_details['name'], chapter))
last_chapter = chapter
# All of this rigmarol below is because the mod2osis tool from the Sword library embeds XML in the
# OSIS but neglects to enclose the verse text (with XML) in <[CDATA[ ]]> tags.
verse_text = self.note_regex.sub('', verse_text)
verse_text = self.title_regex.sub('', verse_text)
verse_text = self.milestone_regex.sub('', verse_text)
verse_text = self.fi_regex.sub('', verse_text)
verse_text = self.rf_regex.sub('', verse_text)
verse_text = self.lb_regex.sub(' ', verse_text)
verse_text = self.lg_regex.sub('', verse_text)
verse_text = self.l_regex.sub(' ', verse_text)
verse_text = self.w_regex.sub('', verse_text)
verse_text = self.q1_regex.sub('"', verse_text)
verse_text = self.q2_regex.sub('\'', verse_text)
verse_text = self.q_regex.sub('', verse_text)
verse_text = self.divine_name_regex.sub(repl, verse_text)
verse_text = self.trans_regex.sub('', verse_text)
verse_text = verse_text.replace('</lb>', '') \
.replace('</l>', '').replace('<lg>', '') \
.replace('</lg>', '').replace('</q>', '') \
.replace('</div>', '').replace('</w>', '')
verse_text = self.spaces_regex.sub(' ', verse_text)
self.create_verse(db_book.id, chapter, verse, verse_text)
self.application.process_events()
self.session.commit()
if match_count == 0:
success = False
except (ValueError, IOError):
log.exception('Loading bible from OSIS file failed')
success = False
finally:
if osis:
osis.close()
if self.stop_import_flag:
return False
else:
return success
|
marmyshev/item_title
|
openlp/plugins/bibles/lib/osis.py
|
Python
|
gpl-2.0
| 9,953
|
[
"Brian"
] |
d261c774fb222a560690d8a1f5eaec214183d8ed4536c7bb4b4132c1428f9998
|
from pymicro.view.vtk_utils import *
from pymicro.view.vtk_anim import *
from pymicro.view.scene3d import Scene3D
from pymicro.crystal.microstructure import Orientation
from pymicro.crystal.lattice import Lattice
s3d = Scene3D(display=True, ren_size=(600, 600))
euler_angles = np.array([142.8, 32.0, 214.4])
(phi1, Phi, phi2) = euler_angles
orientation = Orientation.from_euler(euler_angles)
scene = vtkAnimationScene(s3d.get_renderer(), s3d.renWin.GetSize())
scene.save_image = True
scene.timer_incr = 1
scene.timer_end = 179
scene.verbose = False
scene.prefix = 'euler_angles_anim'
lab_frame = axes_actor(1, fontSize=50)
lab_frame.SetCylinderRadius(0.02)
s3d.add(lab_frame)
crystal_frame = axes_actor(0.6, fontSize=50, axisLabels=None)
crystal_frame.SetCylinderRadius(0.04)
collection = vtk.vtkPropCollection()
crystal_frame.GetActors(collection)
for i in range(collection.GetNumberOfItems()):
collection.GetItemAsObject(i).GetProperty().SetColor(0.0, 0.0, 0.0)
crystal_frame.SetVisibility(0)
s3d.add(crystal_frame)
a = 0.4045 # nm, value for Al
l = Lattice.cubic(a)
cubic_lattice = lattice_3d(l, crystal_orientation=orientation, tubeRadius=0.1 * a, sphereRadius=0.2 * a)
s3d.add(cubic_lattice)
# display the crystal frame progressively
crystal_frame_visibility = vtkSetVisibility(5, crystal_frame, gradually=True)
crystal_frame_visibility.time_anim_ends = 20
scene.add_animation(crystal_frame_visibility)
# apply Euler angles one by one with the Bunge convention (ZXZ)
crystal_frame_rotate_phi1 = vtkRotateActorAroundAxis(30, duration=40, axis=[0., 0., 1.], angle=phi1)
crystal_frame_rotate_phi1.set_actor(crystal_frame)
scene.add_animation(crystal_frame_rotate_phi1)
o_phi1 = Orientation.from_euler((phi1, 0., 0.))
x_prime = np.dot(o_phi1.orientation_matrix().T, [1., 0., 0.])
print('after phi1, X axis is {0}'.format(x_prime))
crystal_frame_rotate_Phi = vtkRotateActorAroundAxis(80, duration=40, axis=[1., 0., 0.], angle=Phi)
crystal_frame_rotate_Phi.set_actor(crystal_frame)
# fix the reference to the user_transform_matrix after phi1
m = vtk.vtkMatrix4x4() # row major order, 16 elements matrix
m.DeepCopy(crystal_frame.GetUserTransform().GetMatrix())
for j in range(3):
for i in range(3):
m.SetElement(j, i, o_phi1.orientation_matrix()[i, j])
crystal_frame_rotate_Phi.user_transform_matrix = m
scene.add_animation(crystal_frame_rotate_Phi)
o_phi1_Phi = Orientation.from_euler((phi1, Phi, 0.))
z_prime = np.dot(o_phi1_Phi.orientation_matrix().T, [0., 0., 1.])
print('after phi1 and Phi, Z axis is {0}'.format(z_prime))
crystal_frame_rotate_phi2 = vtkRotateActorAroundAxis(130, duration=40, axis=[0., 0., 1.], angle=phi2)
crystal_frame_rotate_phi2.set_actor(crystal_frame)
# fix the reference to the user_transform_matrix after phi1 and Phi
m2 = vtk.vtkMatrix4x4() # row major order, 16 elements matrix
m2.DeepCopy(crystal_frame.GetUserTransform().GetMatrix())
for j in range(3):
for i in range(3):
m2.SetElement(j, i, o_phi1_Phi.orientation_matrix()[i, j])
crystal_frame_rotate_phi2.user_transform_matrix = m2
scene.add_animation(crystal_frame_rotate_phi2)
# add some text actors
euler_str = 'The orientation matrix brings the lab frame\n' \
'into coincidence with the crystal frame.\n' \
'Crystal Euler angles = (%.1f, %.1f, %.1f)' % (phi1, Phi, phi2)
euler_text = text(euler_str, coords=(0.5, 0.05))
s3d.get_renderer().AddActor2D(euler_text)
rotation_text = text('', coords=(0.5, 0.95))
s3d.get_renderer().AddActor2D(rotation_text)
def update_rotation_text():
if scene.timer_count < crystal_frame_rotate_phi1.time_anim_starts:
return ''
elif crystal_frame_rotate_phi1.time_anim_starts < scene.timer_count <= crystal_frame_rotate_phi1.time_anim_ends:
the_phi1 = (scene.timer_count - crystal_frame_rotate_phi1.time_anim_starts) / \
float(crystal_frame_rotate_phi1.time_anim_ends - crystal_frame_rotate_phi1.time_anim_starts) * phi1
print('t=%d, computed the_phi1 = %.1f' % (scene.timer_count, the_phi1))
return 'Applying first rotation: phi1 = %.1f degrees' % the_phi1
elif crystal_frame_rotate_Phi.time_anim_starts < scene.timer_count <= crystal_frame_rotate_Phi.time_anim_ends:
the_Phi = (scene.timer_count - crystal_frame_rotate_Phi.time_anim_starts) / \
float(crystal_frame_rotate_Phi.time_anim_ends - crystal_frame_rotate_Phi.time_anim_starts) * Phi
return 'Applying second rotation: Phi = %.1f degrees' % the_Phi
elif crystal_frame_rotate_phi2.time_anim_starts < scene.timer_count <= crystal_frame_rotate_phi2.time_anim_ends:
the_phi2 = (scene.timer_count - crystal_frame_rotate_phi2.time_anim_starts) / \
float(crystal_frame_rotate_phi2.time_anim_ends - crystal_frame_rotate_phi2.time_anim_starts) * phi2
return 'Applying third rotation: phi2 = %.1f degrees' % the_phi2
return None
update_text = vtkUpdateText(rotation_text, update_rotation_text, t=0, duration=scene.timer_end)
scene.add_animation(update_text)
# camera settings
cam = setup_camera(size=(1., 1., 1.))
cam.SetFocalPoint(a / 2, a / 2, a / 2)
s3d.set_camera(cam)
# crystal_frame.SetVisibility(1)
# scene.render_at(100)
scene.render()
|
heprom/pymicro
|
examples/animation/euler_angles_anim.py
|
Python
|
mit
| 5,222
|
[
"CRYSTAL",
"VTK"
] |
63fb549134292abff85aaa1a9a86dbe79550cebbb65dfec9d83e09757b209103
|
"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy import linalg
from scipy._lib.six import callable, get_method_function, get_function_code
from scipy.special import xlogy
from scipy.spatial.distance import cdist, pdist, squareform
__all__ = ['Rbf']
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function approximation/interpolation of
n-dimensional scattered data.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(default is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : str, callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g., the default: 'euclidean', such that the result
is a matrix of the distances from each point in ``x1`` to each point in
``x2``. For more options, see documentation of
`scipy.spatial.distances.cdist`.
Attributes
----------
N : int
The number of data points (as determined by the input arrays).
di : ndarray
The 1-D array of data values at each of the data coordinates `xi`.
xi : ndarray
The 2-D array of data coordinates.
function : str or callable
The radial basis function. See description under Parameters.
epsilon : float
Parameter used by gaussian or multiquadrics functions. See Parameters.
smooth : float
Smoothing parameter. See description under Parameters.
norm : str or callable
The distance function. See description under Parameters.
nodes : ndarray
A 1-D array of node values for the interpolation.
A : internal property, do not use
Examples
--------
>>> from scipy.interpolate import Rbf
>>> x, y, z, d = np.random.rand(4, 50)
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> xi = yi = zi = np.linspace(0, 1, 20)
>>> di = rbfi(xi, yi, zi) # interpolated values
>>> di.shape
(20,)
"""
# Available radial basis functions that can be selected as strings;
# they all start with _h_ (self._init_function relies on that)
def _h_multiquadric(self, r):
return np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return np.exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
return xlogy(r**2, r)
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self)
if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "im_func"):
val = get_method_function(self.function)
elif hasattr(self.function, "__call__"):
val = get_method_function(self.function.__call__)
else:
raise ValueError("Cannot determine number of arguments to "
"function")
argcount = get_function_code(val).co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
if sys.version_info[0] >= 3:
self._function = self.function.__get__(self, Rbf)
else:
import new
self._function = new.instancemethod(self.function, self,
Rbf)
else:
raise ValueError("Function argument must take 1 or 2 "
"arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of "
"the same shape")
return a0
def __init__(self, *args, **kwargs):
# `args` can be a variable number of arrays; we flatten them and store
# them as a single 2-D array `xi` of shape (n_args-1, array_size),
# plus a 1-D array `di` for the values.
# All arrays must have the same number of elements
self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.di = np.asarray(args[-1]).flatten()
if not all([x.size == self.di.size for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', 'euclidean')
self.epsilon = kwargs.pop('epsilon', None)
if self.epsilon is None:
# default epsilon is the "the average distance between nodes" based
# on a bounding hypercube
ximax = np.amax(self.xi, axis=1)
ximin = np.amin(self.xi, axis=1)
edges = ximax - ximin
edges = edges[np.nonzero(edges)]
self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self for use by any user-callable
# function or to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
self.nodes = linalg.solve(self.A, self.di)
@property
def A(self):
# this only exists for backwards compatibility: self.A was available
# and, at least technically, public.
r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
return self._init_function(r) - np.eye(self.N)*self.smooth
def _call_norm(self, x1, x2):
return cdist(x1.T, x2.T, self.norm)
def __call__(self, *args):
args = [np.asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
shp = args[0].shape
xa = np.asarray([a.flatten() for a in args], dtype=np.float_)
r = self._call_norm(xa, self.xi)
return np.dot(self._function(r), self.nodes).reshape(shp)
|
Eric89GXL/scipy
|
scipy/interpolate/rbf.py
|
Python
|
bsd-3-clause
| 10,649
|
[
"Gaussian"
] |
830aed6d9b8c1867545d70d551098636685153175995efb09cdf87ad866630a1
|
#
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members, builtin_type=False):
if builtin_type:
return mcgen('''
typedef struct %(name)sList
{
union {
%(type)s value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
type=c_type(name),
name=name)
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
union {
%(name)s *value;
uint64_t padding;
};
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_fwd_enum_struct(name, members):
return mcgen('''
typedef struct %(name)sList
{
%(name)s value;
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct(structname, fieldname, members):
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct("", argname, argentry)
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value)
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum_name(name):
if name.isupper():
return c_fun(name, False)
new_name = ''
for c in c_fun(name, False):
if c.isupper():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=generate_enum_name(value),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_union(name, typeinfo):
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
void *data;
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_fun(key))
ret += mcgen('''
};
};
''')
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:o:",
["source", "header", "builtins",
"prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
do_c = False
do_h = False
do_builtins = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
elif o in ("-b", "--builtins"):
do_builtins = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include <stdbool.h>
#include <stdint.h>
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
exprs = filter(lambda expr: not expr.has_key('gen'), exprs)
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for typename in builtin_types:
fdecl.write(generate_fwd_struct(typename, None, builtin_type=True))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data']) + "\n"
ret += generate_fwd_enum_struct(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys()))
else:
continue
fdecl.write(ret)
# to avoid header dependency hell, we always generate declarations
# for built-in types in our header files and simply guard them
fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
for typename in builtin_types:
fdecl.write(generate_type_cleanup_decl(typename + "List"))
fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
# ...this doesn't work for cases where we link in multiple objects that
# have the functions defined, so we use -b option to provide control
# over these cases
if do_builtins:
fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for typename in builtin_types:
fdef.write(generate_type_cleanup(typename + "List"))
fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr['type'], "", expr['data']) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr['union'], expr['data'])
ret += generate_type_cleanup_decl(expr['union'] + "List")
fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['union'])
fdef.write(generate_type_cleanup(expr['union']) + "\n")
elif expr.has_key('enum'):
ret += generate_type_cleanup_decl(expr['enum'] + "List")
fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n")
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
duythanhphan/qemu
|
scripts/qapi-types.py
|
Python
|
gpl-2.0
| 8,980
|
[
"VisIt"
] |
3b9b3b32f10c849cbc58543391436b77440a1c73913193384dc6880838f13bdb
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""\
=========================================================================
Reading trajectories from memory --- :mod:`MDAnalysis.coordinates.memory`
=========================================================================
:Author: Wouter Boomsma
:Year: 2016
:Copyright: GNU Public License v2
:Maintainer: Wouter Boomsma <wb@di.ku.dk>, wouterboomsma on github
.. versionadded:: 0.16.0
The module contains a trajectory reader that operates on an array in
memory, rather than reading from file. This makes it possible to use
operate on raw coordinate using existing MDAnalysis tools. In
addition, it allows the user to make changes to the coordinates in a
trajectory (e.g. through
:attr:`MDAnalysis.core.groups.AtomGroup.positions`) without having
to write the entire state to file.
How to use the :class:`MemoryReader`
====================================
The :class:`MemoryReader` can be used to either directly generate a
trajectory as a numpy array or by transferring an existing trajectory
to memory.
In-memory representation of arbitrary trajectories
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If sufficient memory is available to hold a whole trajectory in memory
then analysis can be sped up substantially by transferring the
trajectory to memory.
The most straightforward use of the :class:`MemoryReader` is to simply
use the ``in_memory=True`` flag for the
:class:`~MDAnalysis.core.universe.Universe` class, which
automatically transfers a trajectory to memory::
import MDAnalysis as mda
from MDAnalysisTests.datafiles import TPR, XTC
universe = mda.Universe(TPR, XTC, in_memory=True)
Of course, sufficient memory has to be available to hold the whole
trajectory.
Switching a trajectory to an in-memory representation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The decision to transfer the trajectory to memory can be made at any
time with the
:meth:`~MDAnalysis.core.universe.Universe.transfer_to_memory` method
of a :class:`~MDAnalysis.core.universe.Universe`::
universe = mda.Universe(TPR, XTC)
universe.transfer_to_memory()
This operation may take a while (with `verbose=True` a progress bar is
displayed) but then subsequent operations on the trajectory directly
operate on the in-memory array and will be very fast.
Constructing a Reader from a numpy array
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :class:`MemoryReader` provides great flexibility because it
becomes possible to create a :class:`~MDAnalysis.core.universe.Universe` directly
from a numpy array.
A simple example consists of a new universe created from the array
extracted from a DCD
:meth:`~MDAnalysis.coordinates.DCD.DCDReader.timeseries`::
import MDAnalysis as mda
from MDAnalysisTests.datafiles import DCD, PSF
from MDAnalysis.coordinates.memory import MemoryReader
universe = mda.Universe(PSF, DCD)
coordinates = universe.trajectory.timeseries(universe.atoms)
universe2 = mda.Universe(PSF, coordinates, format=MemoryReader, order='afc')
.. rubric:: Creating an in-memory trajectory with
:func:`~MDAnalysis.analysis.base.AnalysisFromFunction`
The :meth:`~MDAnalysis.coordinates.DCD.DCDReader.timeseries` is
currently only implemented for the
:class:`~MDAnalysis.coordinates.DCD.DCDReader`. However, the
:func:`MDAnalysis.analysis.base.AnalysisFromFunction` can provide the
same functionality for any supported trajectory format::
import MDAnalysis as mda
from MDAnalysis.tests.datafiles import PDB, XTC
from MDAnalysis.coordinates.memory import MemoryReader
from MDAnalysis.analysis.base import AnalysisFromFunction
u = mda.Universe(PDB, XTC)
coordinates = AnalysisFromFunction(lambda ag: ag.positions.copy(),
u.atoms).run().results
u2 = mda.Universe(PDB, coordinates, format=MemoryReader)
.. _creating-in-memory-trajectory-label:
Creating an in-memory trajectory of a sub-system
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Creating a trajectory for just a selection of an existing trajectory
requires the transfer of the appropriate coordinates as well as
creation of a topology of the sub-system. For the latter one can use
the :func:`~MDAnalysis.core.universe.Merge` function, for the former
the :meth:`~MDAnalysis.core.universe.Universe.load_new` method of a
:class:`~MDAnalysis.core.universe.Universe` together with the
:class:`MemoryReader`. In the following, an in-memory trajectory of
only the protein is created::
import MDAnalysis as mda
from MDAnalysis.tests.datafiles import PDB, XTC
from MDAnalysis.coordinates.memory import MemoryReader
from MDAnalysis.analysis.base import AnalysisFromFunction
u = mda.Universe(PDB, XTC)
protein = u.select_atoms("protein")
coordinates = AnalysisFromFunction(lambda ag: ag.positions.copy(),
protein).run().results
u2 = mda.Merge(protein) # create the protein-only Universe
u2.load_new(coordinates, format=MemoryReader)
The protein coordinates are extracted into ``coordinates`` and then
the in-memory trajectory is loaded from these coordinates. In
principle, this could have all be done in one line::
u2 = mda.Merge(protein).load_new(
AnalysisFromFunction(lambda ag: ag.positions.copy(),
protein).run().results,
format=MemoryReader)
The new :class:`~MDAnalysis.core.universe.Universe` ``u2`` can be used
to, for instance, write out a new trajectory or perform fast analysis
on the sub-system.
Classes
=======
.. autoclass:: Timestep
:members:
:inherited-members:
.. autoclass:: MemoryReader
:members:
:inherited-members:
"""
from __future__ import absolute_import
import logging
import errno
import numpy as np
from . import base
class Timestep(base.Timestep):
"""
Timestep for the :class:`MemoryReader`
Overrides the positions property in
:class:`MDAnalysis.coordinates.base.Timestep` to use avoid
duplication of the array.
"""
@property
def positions(self):
return base.Timestep.positions.fget(self)
@positions.setter
def positions(self, new):
self.has_positions = True
# Use reference to original rather than a copy
self._pos = new
class MemoryReader(base.ProtoReader):
"""
MemoryReader works with trajectories represented as numpy arrays.
A trajectory reader interface to a numpy array of the coordinates.
For compatibility with the timeseries interface, support is provided for
specifying the order of columns through the format option.
"""
format = 'MEMORY'
_Timestep = Timestep
def __init__(self, coordinate_array, order='fac',
dimensions=None, dt=1, filename=None, **kwargs):
"""
Parameters
----------
coordinate_array : :class:`~numpy.ndarray` object
The underlying array of coordinates
order : str, optional
the order/shape of the return data array, corresponding
to (a)tom, (f)rame, (c)oordinates all six combinations
of 'a', 'f', 'c' are allowed ie "fac" - return array
where the shape is (frame, number of atoms,
coordinates)
dimensions: (*A*, *B*, *C*, *alpha*, *beta*, *gamma*), optional
unitcell dimensions (*A*, *B*, *C*, *alpha*, *beta*, *gamma*)
lengths *A*, *B*, *C* are in the MDAnalysis length unit (Å), and
angles are in degrees.
dt: float, optional
The time difference between frames (ps). If :attr:`time`
is set, then `dt` will be ignored.
filename: string, optional
The name of the file from which this instance is created. Set to None
when created from an array
"""
super(MemoryReader, self).__init__()
self.filename = filename
self.stored_order = order
self.set_array(np.asarray(coordinate_array), order)
self.n_frames = \
self.coordinate_array.shape[self.stored_order.find('f')]
self.n_atoms = \
self.coordinate_array.shape[self.stored_order.find('a')]
provided_n_atoms = kwargs.pop("n_atoms", None)
if (provided_n_atoms is not None and
provided_n_atoms != self.n_atoms):
raise ValueError("The provided value for n_atoms ({}) "
"does not match the shape of the coordinate "
"array ({})"
.format(provided_n_atoms, self.n_atoms))
self.ts = self._Timestep(self.n_atoms, **kwargs)
self.ts.dt = dt
if dimensions is not None:
self.ts.dimensions = dimensions
self.ts.frame = -1
self.ts.time = -1
self._read_next_timestep()
def set_array(self, coordinate_array, order='fac'):
"""
Set underlying array in desired column order.
Parameters
----------
coordinate_array : :class:`~numpy.ndarray` object
The underlying array of coordinates
order
The order/shape of the return data array, corresponding
to (a)tom, (f)rame, (c)oordinates all six combinations
of 'a', 'f', 'c' are allowed ie "fac" - return array
where the shape is (frame, number of atoms,
coordinates)
"""
# Only make copy if not already in float32 format
self.coordinate_array = coordinate_array.astype('float32', copy=False)
self.stored_format = order
def get_array(self):
"""
Return underlying array.
"""
return self.coordinate_array
def _reopen(self):
"""Reset iteration to first frame"""
self.ts.frame = -1
self.ts.time = -1
def timeseries(self, asel=None, start=0, stop=-1, step=1, format='afc'):
"""Return a subset of coordinate data for an AtomGroup in desired
column order/format. If no selection is given, it will return a view of
the underlying array, while a copy is returned otherwise.
Parameters
---------
asel : :class:`~MDAnalysis.core.groups.AtomGroup` object
Atom selection. Defaults to None, in which case the full set of
coordinate data is returned. Note that in this case, a view
of the underlying numpy array is returned, while a copy of the
data is returned whenever asel is different from None.
start, stop, skip : int
range of trajectory to access, start and stop are inclusive
format : str
the order/shape of the return data array, corresponding
to (a)tom, (f)rame, (c)oordinates all six combinations
of 'a', 'f', 'c' are allowed ie "fac" - return array
where the shape is (frame, number of atoms,
coordinates).
"""
# The "format" name is used for compliance with DCD.timeseries
# Renaming it to order here for internal consistency in this class
order = format
array = self.get_array()
if order == self.stored_order:
pass
elif order[0] == self.stored_order[0]:
array = np.swapaxes(array, 1, 2)
elif order[1] == self.stored_order[1]:
array = np.swapaxes(array, 0, 2)
elif order[2] == self.stored_order[2]:
array = np.swapaxes(array, 0, 1)
elif order[0] == self.stored_order[1]:
array = np.swapaxes(array, 1, 0)
array = np.swapaxes(array, 1, 2)
elif order[0] == self.stored_order[2]:
array = np.swapaxes(array, 2, 0)
array = np.swapaxes(array, 1, 2)
a_index = order.find('a')
f_index = order.find('f')
stop_index = stop+1
if stop_index == 0:
stop_index = None
basic_slice = ([slice(None)] * f_index +
[slice(start, stop_index, step)] +
[slice(None)] * (2-f_index))
# Return a view if either:
# 1) asel is None
# 2) asel corresponds to the selection of all atoms.
array = array[basic_slice]
if (asel is None or asel is asel.universe.atoms):
return array
else:
# If selection is specified, return a copy
return array.take(asel.indices, a_index)
def _read_next_timestep(self, ts=None):
"""copy next frame into timestep"""
if self.ts.frame >= self.n_frames-1:
raise IOError(errno.EIO, 'trying to go over trajectory limit')
if ts is None:
ts = self.ts
ts.frame += 1
f_index = self.stored_order.find('f')
basic_slice = ([slice(None)]*(f_index) +
[self.ts.frame] +
[slice(None)]*(2-f_index))
ts.positions = self.coordinate_array[basic_slice]
ts.time = self.ts.frame*self.dt
return ts
def _read_frame(self, i):
"""read frame i"""
# Frame number is incremented to zero by _read_next_timestep()
self.ts.frame = i - 1
return self._read_next_timestep()
def __repr__(self):
"""String representation"""
return ("<{cls} with {nframes} frames of {natoms} atoms>"
"".format(
cls=self.__class__.__name__,
nframes=self.n_frames,
natoms=self.n_atoms
))
|
kain88-de/mdanalysis
|
package/MDAnalysis/coordinates/memory.py
|
Python
|
gpl-2.0
| 14,541
|
[
"MDAnalysis"
] |
34d5d6174ea19de86a1de2d1b4fb74e94a625050ab0eb49b2d8b5bd2a6f4363b
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.aextras import APopup
from agui.backends.gtk.imports import *
class Popup(APopup):
def popup(self, app, title, message, icon):
Notify.init(app)
n = Notify.Notification.new(title, message, icon.name())
n.show()
|
bhdouglass/agui
|
agui/backends/gtk/extras/popup.py
|
Python
|
gpl-3.0
| 1,012
|
[
"Brian"
] |
7df8e187af9e0c7b9781fd0a01eaf33677b775081ad4b56feb63ab1f22f7bc62
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches
import numpy as np
import Nsound as ns
def main():
matplotlib.rc('font', size = 24)
matplotlib.rc('figure', figsize = [16, 6])
matplotlib.rcParams.update({'figure.subplot.left' : 0.09 })
matplotlib.rcParams.update({'figure.subplot.bottom': 0.15 })
matplotlib.rcParams.update({'figure.subplot.right' : 0.97 })
matplotlib.rcParams.update({'figure.subplot.top' : 0.88 })
sr = 1000
#--------------------------------------------------------------------------
# figure 1
gen = ns.Sine(sr)
signal = ns.AudioStream(sr, 1)
signal << gen.generate(1.0, 3)
signal.plot('3 Hz Signal')
fig = plt.gcf()
ax = plt.gca()
blue_line = ax.lines[0]
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_1-0.svg')
# plot sub-sampled signal in time
buf = signal[0]
step = len(signal) // 32
y = buf[0:-1:step]
t = np.linspace(0, 1.0, len(signal))[0:-1:step]
red_lines = []
for tt, yy in zip(t, y):
l = plt.axvline(x = tt, color = 'red')
red_lines.append(l)
plt.savefig('figure_1-2.svg')
plt.plot(t, y, 'ro')
plt.savefig('figure_1-3.svg')
# remove blue line & red lines
blue_line.remove()
for l in red_lines:
l.remove()
# draw lolli pop
for tt, yy in zip(t, y):
plt.plot([tt, tt], [0, yy], 'b-', zorder = -1)
fig.canvas.draw()
plt.savefig('figure_1-4.svg')
#--------------------------------------------------------------------------
# figure 2
signal.plot('3 Hz Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-0.svg')
# multiply the signal by a gaussian
s1 = signal * gen.drawGaussian(1.0, 0.33, 0.15)
s1.plot('3 Hz Signal * env')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-1.svg')
# multiply the signal by a gaussian
s2 = signal * gen.drawGaussian(1.0, 0.5, 0.15)
s2.plot('3 Hz Signal * env')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-2.svg')
# multiply the signal by a gaussian
s3 = signal * gen.drawGaussian(1.0, 0.66, 0.15)
s3.plot('3 Hz Signal * env')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-3.svg')
# multiply the signal by a gaussian
s4 = signal * (0.05 + gen.drawGaussian(1.0, 0.66, 0.15))
s4.normalize();
s4.plot('3 Hz Signal & ???')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-4.svg')
#--------------------------------------------------------------------------
# figure 3
signal.plot('3 Hz Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.15, 1.15])
plt.ylim([-1.15, 1.15])
plt.savefig('figure_3-0.svg')
# add red rectangle
cx = 0.5
cy = 0
w = 1.10
h = 2.10
xy = [cx - 0.5 * w, cy - 0.5 * h]
r = matplotlib.patches.Rectangle(
xy,
width = w,
height = h,
ec = 'red',
fc = 'none',
)
ax = plt.gca()
ax.add_patch(r)
plt.savefig('figure_3-0.svg')
# shrink rectangle
w *= 0.666
x = cx - 0.5 * w
r.set_x(x)
r.set_width(w)
ax.figure.canvas.draw()
plt.savefig('figure_3-1.svg')
# shrink rectangle
w *= 0.333
x = cx - 0.5 * w
r.set_x(x)
r.set_width(w)
ax.figure.canvas.draw()
plt.savefig('figure_3-2.svg')
#--------------------------------------------------------------------------
# figure 4
sig = signal
time_axis = np.linspace(0, 1.0, len(sig))
rec_dict = dict(xy = (0,-1), width=1, height=2, ec = 'red', fc = 'none')
freqs = [6, 4.5, 3.3, 3.1]
for i, f in enumerate(freqs):
sig2 = gen.drawSine(1.0, f)
tones = (sig + sig2) / 2.0
plt.figure()
plt.plot(time_axis, tones[0].toList())
plt.grid(True)
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.15, 1.15])
plt.ylim([-1.15, 1.15])
plt.title('3 Hz + %.1f Hz' % f)
r = matplotlib.patches.Rectangle(**rec_dict)
ax = plt.gca()
ax.add_patch(r)
plt.savefig('figure_4-%d.svg' % i)
plt.show()
if __name__ == "__main__":
main()
|
weegreenblobbie/sd_audio_hackers
|
20160626_spectrograms_explained/media/make_plots.py
|
Python
|
mit
| 4,732
|
[
"Gaussian"
] |
cee711ef6ef4f95c7d975c0a30b6fb39240092fb94116c74f8dea0721f6b8c1d
|
#!/usr/bin/python
"""Test of Orca bookmarks
"""
from macaroon.playback import *
import utils
sequence = MacroSequence()
########################################################################
# We wait for the focus to be on the Firefox window as well as for focus
# to move to the frame.
#
sequence.append(WaitForWindowActivate(utils.firefoxFrameNames, None))
########################################################################
# Load the simpleform.html page.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction(utils.htmlURLPrefix + "simpleform.html"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_DOCUMENT_FRAME))
########################################################################
# Tab to the first entry. The following will be presented.
#
# BRAILLE LINE: 'Type something here: $l'
# VISIBLE: 'Type something here: $l', cursor=23
# SPEECH OUTPUT: ''
# SPEECH OUTPUT: 'Type something here: text '
#
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
########################################################################
# Set a bookmark at the entry. The following will be presented.
#
# SPEECH OUTPUT: 'entered bookmark'
# SPEECH OUTPUT: 'Type something here: text '
#
sequence.append(KeyPressAction (0, None, "Alt_L"))
sequence.append(KeyPressAction (0, None, "Insert"))
sequence.append(TypeAction ("1"))
sequence.append(KeyReleaseAction(150, None, "Insert"))
sequence.append(KeyReleaseAction(150, None, "Alt_L"))
########################################################################
# Tab to the first checkbox. Irrelevant output ignored.
#
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus("", acc_role=pyatspi.ROLE_PASSWORD_TEXT))
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(KeyComboAction("Tab"))
sequence.append(WaitForFocus("Red", acc_role=pyatspi.ROLE_CHECK_BOX))
########################################################################
# Set a bookmark at the checkbox. The following will be presented.
#
# SPEECH OUTPUT: 'entered bookmark'
# SPEECH OUTPUT: 'Red check box not checked'
#
sequence.append(KeyPressAction (0, None, "Alt_L"))
sequence.append(KeyPressAction (0, None, "Insert"))
sequence.append(TypeAction ("2"))
sequence.append(KeyReleaseAction(150, None, "Insert"))
sequence.append(KeyReleaseAction(150, None, "Alt_L"))
########################################################################
# Go to bookmark number 1. The following will be presented.
#
# BRAILLE LINE: 'Type something here: $l'
# VISIBLE: 'Type something here: $l', cursor=23
# SPEECH OUTPUT: 'Type something here: text '
#
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("1"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
#sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
########################################################################
# Go to bookmark number 2. The following will be presented.
# Note: some unicode characters have been removed from Braille output.
#
# BRAILLE LINE: 'Check one or more: < > CheckBox Red < > CheckBox Blue < > CheckBox Green'
# VISIBLE: '? < > CheckBox Red < > CheckBox ', cursor=1
# SPEECH OUTPUT: 'Red check box not checked'
#
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("2"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
#sequence.append(WaitForFocus("Red", acc_role=pyatspi.ROLE_CHECK_BOX))
########################################################################
# Go to the next bookmark. The following will be presented.
#
# BRAILLE LINE: 'Type something here: $l'
# VISIBLE: 'Type something here: $l', cursor=23
# SPEECH OUTPUT: 'Type something here: text '
#
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("B"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
#sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
########################################################################
# Close the demo
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction("about:blank"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
# Just a little extra wait to let some events get through.
#
sequence.append(PauseAction(3000))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
h4ck3rm1k3/orca-sonar
|
test/keystrokes/firefox/orca_bookmarks.py
|
Python
|
lgpl-2.1
| 4,788
|
[
"ORCA"
] |
84309bcd389e1187ac3a31623640bbfea78af5e251bb4d4c9e20ed132347c57e
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
from scipy.optimize import curve_fit
from scipy import asarray as ar,exp
from matplotlib.pyplot import subplot2grid
from scipy.stats import linregress
import statsmodels.api as sm
"""
To save list of all parroquias:
regions = df.groupby(['provincia_nombre','canton_nombre','parroquia_nombre'])
keys = regions.indices.keys()
keys = sorted(keys)
with open('provincia_canton_parroquia_.dat','w') as f:
for key in keys:
f.write(';'.join(key) + '\n')
"""
show = True
save = True
df02a = '../data_prior/presidential_election_2002a.dta'
df02b = '../data_prior/presidential_election_2002b.dta'
df06a = '../data_prior/presidential_election_2006a.dta'
df06b = '../data_prior/presidential_election_2006b.dta'
df09 = '../data_prior/presidential_election_2009.dta'
df13 = '../data_prior/presidential_election_2013.dta'
## =============================================================================
def gauss(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))
## =============================================================================
def fitGauss(data):
dx = 0.02
bins = np.arange(0, 1 + 2*dx, dx) - dx/2
histo = plt.hist(data, bins=bins, \
range=(0,1), color='red', label="somelabel")
# fit Gaussian to histogram
histX = histo[1][2:] - dx/2
p0 = [100,0.6,0.5] # ht/cen/sig
popt, pcov = curve_fit(gauss, histX, histo[0][1:], p0)
return popt
## =============================================================================
def plotGauss(data,popt):
if show:
print " height : ", popt[0], "\n" \
" center : ", popt[1], "\n" \
" width : ", popt[2], "\n"
# plot the gaussian
gOut = gauss(bins, popt[0], popt[1], popt[2])
line, = plt.plot(bins,gOut, 'k--', linewidth=3)
# prettify
plt.ylim(0,100)
plt.xlim(0,1)
plt.xticks(np.arange(0,1.1,0.1))
plt.title("Fraction of votes", size=24)
plt.xlabel("Fraction", size=18)
plt.ylabel("Number of precincts", size=18)
plt.legend()
plt.draw()
## =============================================================================
# http://stackoverflow.com/questions/19379295/linear-regression-with-pandas-dataframe
def fit_line1(x, y):
"""Return slope, intercept of best fit line."""
# Remove entries where either x or y is NaN.
clean_data = pd.concat([x, y], 1).dropna(0) # row-wise
(_, x), (_, y) = clean_data.iteritems()
slope, intercept, r, p, stderr = linregress(x, y)
return slope, intercept # could also return stderr
def fit_line2(x, y):
"""Return slope, intercept of best fit line."""
X = sm.add_constant(x)
model = sm.OLS(y, X, missing='drop') # ignores entires where x or y is NaN
fit = model.fit()
return fit.params[1], fit.params[0] # could also return stderr in each via fit.bse
def fit_line3(x, y, w):
"""Return slope, intercept of best fit line."""
X = sm.add_constant(x)
model = sm.WLS(y, X, w, missing='drop') # ignores entires where x or y is NaN
fit = model.fit()
return fit.params[1], fit.params[0] # could also return stderr in each via fit.bse
## =============================================================================
def calc_unc(ycount,tot):
"""
for computing errors of some frac, ycount/total
any points with zero counts are set to 1 for computing errorbar
params
------
formula:
Z = Y/X
dz = Z * sqrt( (dx/X)**2 + (dy/Y)**2 )
dz = Z * sqrt( 1/X + 1/Y )
W = 1/dz**2
W = 1/Z**2 * (1/X + 1/Y)
"""
Y = ycount.copy()
Y[Y==0] = 1
return Y/tot * np.sqrt( 1./Y + 1./tot)
# return 1./dz**2
## =============================================================================
def getFracs(dat, codigo=None):
"""
dat is dataframe
returns dfrac, a dataframe grouped by precinct (id_provcantparr+sexo)
'valfrac' : valfrac.values, \
'votefrac' :votefrac.values, \
'nullfrac' : nulls.values * 1.0/tot, \ # voided ballots
'blankfrac' : blank.values * 1.0/tot, \
'nullcount' : nulls, \
'blankcount': blank, \
'total' : tot, \
"""
if 'precinct' in dat.columns:
indcol = 'precinct'
else:
indcol = 'id_provcantparr'
# scalar value of registered voters, being a lazy progammer
tot = dat.groupby(indcol).electores.mean()
#
blank = dat.groupby(indcol).votos_en_blanco.mean()
nulls = dat.groupby(indcol).votos_nulos.mean()
# total valid votes across all candidates
valid = dat.groupby(indcol).candidato_votos.sum()
# fraction of voters that cast valid ballots
valfrac = valid / tot
if codigo:
# if given a numer code
win = dat.candidato_votos[dat.candidato_codigo==code]
else:
# extract votes for winner
win = dat.candidato_votos[dat.candidato_estado=='electos']
#
votefrac = win * 1.0 / valid.values
dfrac = pd.DataFrame( \
data={ 'valfrac' : valfrac.values, \
'votefrac' :votefrac.values, \
'nullfrac' : nulls.values * 1.0/tot, \
'blankfrac' : blank.values * 1.0/tot, \
'nullcount' : nulls, \
'blankcount': blank, \
'total' : tot, \
},\
index=valfrac.index)
return dfrac
## =============================================================================
def plotFinger(dfrac, path, df, candidato_codigo=None):
'''
Plot the election fingerprint.
Input:
dfrac -
path - string to filepath
Given list of frac DataFrames, extract data and plot 2d histogram
'''
# metadata
if candidato_codigo:
# plotted candidate assigned
code = candidato_codigo
else:
# winner code
code = df[df.candidato_estado=='electos'].candidato_codigo.values[0]
name = df[df.candidato_codigo==code].candidato_nombre.values[0]
name = name.split()[0]
if path:
yr = path.split('_')[-1]
yr = yr.split('.')[0]
leg = yr + ' ' + name
# plot histogram
ax0 = plt.subplot2grid( (4,3), (0,0), rowspan=3, colspan=3)
# http://matplotlib.org/examples/color/colormaps_reference.html
# perceptually-uniform are: viridis, inferno, plasma, magma
cmap = plt.get_cmap('plasma')
# plot with bins
histo = ax0.hist2d( \
dfrac.valfrac.values, dfrac.votefrac.values, \
bins=100, range=[[0,1],[0,1]], \
cmin=1, cmax=5, cmap=cmap, \
# label = "test label"
);
xticks = np.arange(0,1.1,0.1)
yticks = xticks
ax0.set_xticks(xticks)
ax0.set_yticks(yticks)
ax0.set_title("Election fingerprint, " + leg, size=20)
ax0.set_ylabel("Fraction voting for winner", size=16)
# cumulative subplot
ax1 = plt.subplot2grid( (4,3), (3,0), rowspan=1, colspan=3)
hvals = np.ma.array(histo[0], mask=np.isnan(histo[0]))
hsum = np.ma.sum(hvals,axis=1)
hsum = hsum.cumsum()
hsum = hsum / hsum.max()
ax1.plot( histo[1][1:], hsum, 'b-' )
ax1.axis( [0, 1, -0.1, 1.1 ])
ax1.plot( [0, 1], [0, 0], 'k--')
ax1.plot( [0, 1], [1, 1], 'k--')
ax1.set_xticks(xticks)
ax1.set_xlabel("Fraction of valid ballots", size=16)
if save:
plt.savefig('fingerprint_' + yr + '_' + name + '.png')
if show:
plt.show();
## =============================================================================
# def plotFinger(dfrac, path, df, candidato_codigo=None):
def plotBadBallots(dfrac):
xmax = 2 #0.25
xvals = np.arange(0, 2*xmax, xmax )
blankSlope, blankInt = fit_line1(dfrac.blankfrac, dfrac.votefrac)
nullSlope, nullInt = fit_line1(dfrac.nullfrac, dfrac.votefrac)
## errors
errNull = calc_unc( dfrac.nullcount, dfrac.total )
"""
The plot I prefer has nullfrac as a function of votefrac, meaning the errorbars
are along the x-axis. But sm.WLS assumes y-errors. So feed into fitline,
then invert from:
y = mx + b
to:
x = (y-b)/m
"""
nullm, nullb = fit_line3( dfrac.nullfrac, dfrac.votefrac, 1/errNull**2)
if False:
nullb = -1*nullb/nullm
nullm = 1.0/nullm
blankLine = blankInt + blankSlope * xvals
nullLine = nullb + nullm * xvals
# plt.plot(xvals, blankLine, 'b-')
plt.plot(xvals, nullLine, 'r-')
# blankScatt = plt.scatter( \
# dfrac.blankfrac, dfrac.votefrac, \
# s=50, \
# facecolors='none', \
# edgecolors='r', \
# );
nullScatt = plt.errorbar(
dfrac.votefrac, dfrac.nullfrac, yerr = errNull, \
fmt = 'ro', \
# s=50, \
# facecolors='none', \
# edgecolors='b', \
);
# plt.axis([0, xmax, 0, 1])
plt.show()
## =============================================================================
path = df09
df = pd.read_stata( path )
# remove crazy offset from candidate code
df.candidato_codigo = df.candidato_codigo - np.round(df.candidato_codigo,-2)
# use precinctcode + gender as index
df['precinct'] = df.id_provcantparr + df.sexo.astype(basestring)
print df.candidato_codigo.unique()
code = 0
dfrac = getFracs(df, code)
"""
Current status: trying to fit :
blank ballots vs votes for candidate,
null votes vs votes for candidate
a fair election shouldn't have any correlation for those.
the idea being to plot them on the right side of the fingerprint, so really it
will be teh inverse of those: votes vs blank, votes vs nulls
sm.WLS assumes error is the y-axis error. So for this I'll need to fit them,
then invert the results from:
y = m*x + b
to:
x = (y-b)/m = 1/m*y - b/m
votes = blank/mfit - bfit/mfit
"""
#plotFinger( dfrac, path, df, code )
plotBadBallots(dfrac)
"""
need to make weighted regression on voteshare vs blanks (and nulls), to check if there's a systemic bias for/against a candidate.
"""
"""
33 columns:
dignidad_codigo race_code
dignidad_ambito race_level
dignidad_nombre race_name
provincia_codigo 1-27
provincia_nombre
candidato_codigo code (0-7) for each candidate (3 is Correa)
candidato_votos sum of votes in precint
candidato_estado electos / NaN
candidato_nombre
electores registered voters
numero_de_actas number of ballot boxes
canton_codigo 263 unique
canton_nombre
sexo
votos_en_blanco
votos_nulos
blank + invalid + valid + no-shows = electores
no-shows not recorded, inferred from math
op_provincia_codigo
op_canton_codigo
op_parroquia_codigo
op_tipo
op_ambito
op_nombre party name
op_siglas party initials
op_lista
op_codigo
op_votos_en_plancha NaN
parroquia_codigo 1248 unique
parroquia_nombre city? 1097 uniques
parroquia_estado city's state
id_prov 27
id_provcant 263 unique
id_provcantparr 1248 unique : concat state/city/precinct
id_opcodigo 8 unique : ['00048', '00095', '00005', '00056', '00119', '00100', '00076', '00075']
"""
|
lucidjuvenal/quis-custodiet
|
stats/ecdata.py
|
Python
|
gpl-3.0
| 10,335
|
[
"Gaussian"
] |
7e473e0cf823bf1db00f07a8eebac2d27ad6ed946bd0a1a39d3cc053ec5d18b4
|
"""Components for loading and unloading data using `Storm`_.
See :ref:`Using LoadableFixture<using-loadable-fixture>` for examples.
.. _Storm: https://storm.canonical.com/
"""
from fixture.loadable import DBLoadableFixture
from fixture.util import _mklog
stlog = _mklog('fixture.loadable.storm')
class StormMedium(DBLoadableFixture.StorageMediumAdapter):
def clear(self, obj):
self.transaction.remove(obj)
def save(self, row, column_vals):
from storm.info import get_cls_info
from storm.locals import ReferenceSet, Store
cls_info = get_cls_info(self.medium)
column_vals = list(column_vals)
pk = []
for n, v in column_vals:
propid = id(getattr(self.medium, n))
if propid in cls_info.primary_key_idx:
pk.append((cls_info.primary_key_idx[propid], v, n))
assert len(pk) == 0 or len(pk) == len(cls_info.primary_key), (
"Incomplete primary key see %s need %s" % (
[x[2] for x in pk], [x.name for x in cls_info.primary_key]))
if pk:
obj = self.transaction.get(self.medium, tuple([x[1] for x in sorted(pk)]))
else:
obj = None
if obj is None:
obj = self.medium()
self.transaction.add(obj)
assert Store.of(obj) is self.transaction
for n, v in column_vals:
if isinstance(getattr(self.medium,n), ReferenceSet):
getattr(obj, n).add(v)
else:
setattr(obj, n, v)
self.transaction.flush()
stlog.info("%s %s", obj, [(n,getattr(obj,n)) for n in row.columns()])
return obj
def visit_loader(self, loader):
"""Visit the loader and store a reference to the transaction connection"""
self.transaction = loader.transaction
pass
class StormFixture(DBLoadableFixture):
StormMedium = StormMedium
Medium = StormMedium
def __init__(self, store=None, use_transaction=True,
close_store=False, **kw ):
DBLoadableFixture.__init__(self, **kw)
self.store = store
self.close_store = close_store
self.use_transaction = use_transaction
def create_transaction(self):
return self.store
pass
|
patrickod/fixture
|
fixture/loadable/storm_loadable.py
|
Python
|
lgpl-2.1
| 2,295
|
[
"VisIt"
] |
eec62a19d7233f799800f9cf3c230f8479cb5ea0d7b55de73780a8276a4cecf0
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
******************************************
**espresso.interaction.DihedralPotential**
******************************************
"""
# -*- coding: iso-8859-1 -*-
from espresso import pmi
from espresso import toReal3DFromVector
from _espresso import interaction_DihedralPotential
# Python base class for dihedral potentials
class DihedralPotentialLocal(object):
def computeEnergy(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeEnergy(self, arg0)
return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args))
def computeForce(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1: # in case theta is passed
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeForce(self, arg0)
return self.cxxclass.computeForce(self, toReal3DFromVector(*args))
if pmi.isController:
class DihedralPotential(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
localcall = [ 'computeForce', 'computeEnergy' ],
pmiproperty = [ 'cutoff' ]
)
|
BackupTheBerlios/espressopp
|
src/interaction/DihedralPotential.py
|
Python
|
gpl-3.0
| 2,349
|
[
"ESPResSo"
] |
cc3d69ba9c954a0e57f3bebb7d86126c3b234b42ce6d83040929327a439df447
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test suite for sloth.raytracing - shadow part"""
import os
import sys
import math
import unittest
from sloth.utils.bragg import bragg_ev
from sloth.raytracing.shadow_utils import plot_energy_histo, plot_footprint, plot_image
HAS_SHADOW = False
try:
import Shadow
Shadow.ShadowTools.plt.ion()
HAS_SHADOW = True
except ImportError:
print("WARNING: {0}\n => this test will probably fail!".format(sys.exc_info()[1]))
pass
CURDIR = os.path.dirname(os.path.realpath(__file__))
DATA = os.path.join(CURDIR, "testdata")
# human-like test (TODO: move to unittest)
def sbca_si555(
nrays=500000,
rmirr=50.0,
theta0=75.0,
cone_max=0.11,
f_color=3,
ph1=10229.0,
ph2=10237,
iwrite=0,
f_angle=0,
run=True,
):
"""Si(555) SBCA using flat crystal reflectivity
.. *NOTE*: units given in cm
Parameters
==========
rmirr : bending radius of surface [50.]
theta0 : Bragg angle in deg [75.]
f_color : photon energy distribution type (1=single energy, 3=uniform energy
distribution) [3]
ph1, ph2 : energy_start, energy_end [10299.0, 10237]
iwrite : write start/end/begin/star files (0=No, 1=Yes) [0]
f_angle : write angle.XX (0=No, 1=Yes) [0]
"""
if HAS_SHADOW is False:
print("ERROR: Shadow not found")
return (None, None, None)
p = rmirr * math.sin(math.radians(theta0))
si_d111 = 3.13562683
ene0 = bragg_ev(theta0, si_d111 / 5)
if (f_color == 1) and (ph1 is None):
ph1 = ene0
ph2 = ene0
if (f_color == 3) and (ph1 is None):
ph1 = ene0 - 0.1
ph2 = ene0 + 0.1
# initialize shadow3 source (src) and beam
beam = Shadow.Beam()
src = Shadow.Source()
oe = Shadow.OE()
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
src.CONE_MAX = cone_max
src.FDISTR = 5
src.FSOUR = 2 # elliptical
src.FSOURCE_DEPTH = 0
src.F_COLOR = f_color
src.F_PHOT = 0 # eV
src.NPOINT = nrays
src.PH1 = ph1
src.PH2 = ph2
src.WXSOU = 0.08
src.WZSOU = 0.01
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
oe.DUMMY = 1.0
oe.FHIT_C = 1
oe.FILE_REFL = bytes(os.path.join(DATA, "Si555.dat"), "utf8")
oe.FMIRR = 1
oe.FSHAPE = 2
oe.FWRITE = 1
oe.F_ANGLE = f_angle
oe.F_CRYSTAL = 1
oe.F_EXT = 1
oe.RLEN1 = 5.0
oe.RLEN2 = 5.0
oe.RMIRR = rmirr
oe.RWIDX1 = 5.0
oe.RWIDX2 = 5.0
oe.T_IMAGE = p
oe.T_INCIDENCE = 90.0 - theta0
oe.T_REFLECTION = 90.0 - theta0
oe.T_SOURCE = p
if run:
# Run SHADOW to create the source
if iwrite:
src.write("start.00")
beam.genSource(src)
if iwrite:
src.write("end.00")
beam.write("begin.dat")
# Run optical element 1
# print("INFO: running optical element: %d"%(1))
if iwrite:
oe.write("start.01")
beam.traceOE(oe, 1)
if iwrite:
oe.write("end.01")
beam.write("star.01")
print("INFO: Si(555) SBCA, R = {0:.0f} cm, theta0 = {1:.3f}".format(rmirr, theta0))
print("INFO: => p[q] = {0:.4f} cm , ene0 = {1:.3f}".format(p, ene0))
return (beam, src, oe)
def run_test_sbca_si555():
if HAS_SHADOW is False:
print("ERROR: Shadow not found")
return None
print("MANUAL TEST FOR CHECKING IF SHADOW WORKS CORRECTLY")
print(
"it is recommended to run this in an empty directory as it generates temporary files"
)
beam, src, oe = sbca_si555(
nrays=500000,
rmirr=50.0,
theta0=82.0,
cone_max=0.11,
ph1=9981,
ph2=9984,
iwrite=0,
)
plot_footprint()
plot_image(beam)
plot_energy_histo(beam)
# unittest ###
class TestShadow(unittest.TestCase):
def test_shadow(self):
self.assertTrue(HAS_SHADOW)
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestShadow))
return test_suite
if __name__ == "__main__":
if 0:
# unittest
unittest.main(defaultTest="suite")
if 1:
# manual test
run_test_sbca_si555()
|
maurov/xraysloth
|
sloth/raytracing/test/test_shadow.py
|
Python
|
bsd-3-clause
| 4,353
|
[
"CRYSTAL"
] |
7ef92c6ef87f3c20a43ddd7fffab8ca6c2a642b63296327026e49216eaf7670c
|
# -*- coding: utf-8 -*-
import subprocess as sb
import sys
import logging
from distutils.dir_util import copy_tree
import os
from tqdm import tqdm
logging.basicConfig(level=logging.INFO,
format='%(levelname)-5s @ %(asctime)s:\n\t %(message)s \n',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
error = logging.critical
warn = logging.warning
debug = logging.debug
info = logging.info
__version__ = "0.5.5"
HAYSTACK_VERSION=__version__
def check_file(filename):
try:
with open(filename): pass
except IOError:
error('I cannot open the file:'+filename)
sys.exit(1)
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check_required_packages():
if which('bedtools') is None:
error('Haystack requires bedtools.'
' Please install using bioconda')
sys.exit(1)
if which('bedGraphToBigWig') is None:
info(
' Haystack requires bedGraphToBigWig.'
' Please install.')
sys.exit(1)
if which('sambamba') is None:
info(
'Haystack requires sambamba.'
' Please install.')
sys.exit(1)
if which('bigWigAverageOverBed') is None:
info(
'Haystack requires bigWigAverageOverBed. '
'Please install.')
sys.exit(1)
if which('meme') is None:
info(
'Haystack requires meme. '
'Please install.')
sys.exit(1)
if which('java') is None:
info(
'Haystack requires java. '
'Please install.')
sys.exit(1)
def determine_path(folder=''):
_ROOT = os.path.abspath(os.path.dirname(__file__))
_ROOT = os.path.join(_ROOT,'haystack_data')
print(_ROOT)
# if os.environ.has_key('CONDA_PREFIX'): #we check if we are in an conda env
# #_ROOT = '%s/haystack_data' % os.environ['HOME']
# _ROOT=os.environ['CONDA_PREFIX']
# else:
# _ROOT =which('python').replace( '/bin/python', '') #we are in the main conda env
#
# _ROOT=os.path.join(_ROOT,'share/haystack_data')
return os.path.join(_ROOT,folder)
def run_testdata():
test_data_dir= determine_path("test_data")
os.chdir(test_data_dir)
cmd= "haystack_pipeline samples_names.txt hg19 --output_directory $HOME/haystack_test_output --blacklist hg19 --chrom_exclude 'chr(?!21)'"
try:
info("running test")
sb.call(cmd, shell=True)
info("Test completed successfully")
except:
error("Cannot run test")
def copy_haystack_data():
info("copying data")
data_root = determine_path()
d_path = lambda x: (x, os.path.join(data_root, x))
try:
copy_tree(*d_path('test_data'))
copy_tree(*d_path('extra'))
copy_tree(*d_path('genomes'))
copy_tree(*d_path('gene_annotations'))
copy_tree(*d_path('motif_databases'))
print(os.listdir(os.path.join(data_root)))
except:
info("Cannot move data")
#taken from tqdm website
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def check_md5sum(genome_filename, genome_name):
import hashlib
def hash_bytestr_iter(bytesiter, hasher, ashexstr=True):
# taken from https://stackoverflow.com/a/3431835
for block in bytesiter:
hasher.update(block)
return (hasher.hexdigest() if ashexstr else hasher.digest())
def file_as_blockiter(afile, blocksize=65536):
# taken from https://stackoverflow.com/a/3431835
with afile:
block = afile.read(blocksize)
while len(block) > 0:
yield block
block = afile.read(blocksize)
md5_dic = {"hg38": "dcc3ea27079aa6dc3f9deccd7275e0f8",
"hg19": "bcdbfbe9da62f19bee88b74dabef8cd3",
"hg18": "05e8d31e39545273914397ad6204448e",
"mm10": "fcfcc276799031793a513e2e9c07adad",
"mm9": "e47354d24b9d95e832c337d42b9f8f71",
"ce10": "3d0bab4bc255fc5b3276a476e13d230c",
"sacCer3": "880201a7d1ec95c0185b0b4783c80411",
"sacCer2": "ed3b980b89a22f7d869091bee874d4b5",
"dm6": "62f44f8cbf76c78ce923cb6d87559963",
"dm3": "4ec509b470010829be44ed8e7bfd7f57"}
if genome_name in md5_dic.keys():
md5_source = md5_dic[genome_name]
md5_hash_returned = hash_bytestr_iter(file_as_blockiter(open(genome_filename, 'rb')),
hashlib.md5())
check_flag = (md5_source == md5_hash_returned)
if check_flag:
info("MD5 verification Succeeded!")
else:
info("MD5 verification failed!.")
else:
info( 'Cannot verify MD5 sum. The MD5 hash for %s is not found in the internal saved list' %genome_name )
info(' '.join(md5_dic.keys()))
check_flag= True
return check_flag
def initialize_genome(genome_name):
from bioutilities import Genome_2bit
import urllib
info('Initializing Genome:%s' % genome_name)
genome_directory = determine_path('genomes')
info('genome_directory: %s' % genome_directory)
genome_filename = os.path.join(genome_directory, "%s.2bit" % genome_name)
chr_len_filename = os.path.join(genome_directory, "%s_chr_lengths.txt" % genome_name)
meme_bg_filename = os.path.join(genome_directory, "%s_meme_bg" % genome_name)
download_genome = True
if os.path.exists(genome_filename):
try:
Genome_2bit(genome_filename, verbose=True)
md5_check_flag = check_md5sum(genome_filename, genome_name)
if md5_check_flag:
download_genome = False
info('File %s exists. Skipping genome download' % genome_filename)
else:
download_genome = True
except:
download_genome = True
error("Unable to check MD5 sum. Downloading genome.")
if download_genome:
info('Sorry I need the genome file to perform the analysis. Downloading...')
urlpath = "http://hgdownload.cse.ucsc.edu/goldenPath/%s/bigZips/%s.2bit" % (genome_name, genome_name)
info('Downloading %s in %s...' % (urlpath, genome_filename))
try:
with TqdmUpTo(unit='B', unit_scale=True, mininterval=30, miniters=1, desc=urlpath.split('/')[-1]) as t:
urllib.urlretrieve(urlpath,
filename=genome_filename,
reporthook=t.update_to,
data=None)
info('Downloaded %s in %s:' % (urlpath, genome_filename))
except IOError, e:
error("Can't retrieve %r to %r: %s" % (urlpath, genome_filename, e))
info('Sorry I need the genome file to perform the analysis. Exiting...')
sys.exit(1)
check_file(genome_filename)
genome = Genome_2bit(genome_filename, verbose=True)
if not os.path.exists(chr_len_filename):
info('Extracting chromosome lengths')
genome.write_chr_len(chr_len_filename)
info('Done!')
else:
info('File %s exists, skipping generation' % chr_len_filename)
if not os.path.exists(meme_bg_filename):
info('Calculating nucleotide frequencies....')
genome.write_meme_background(meme_bg_filename)
info('Done!')
else:
info('File %s exists, skipping generation' % meme_bg_filename)
check_file(chr_len_filename)
check_file(meme_bg_filename)
info('Sorting chromosome lengths file....')
cmd = ' sort -k1,1 -k2,2n "%s" -o "%s" ' % (chr_len_filename,
chr_len_filename)
sb.call(cmd, shell=True)
return genome, chr_len_filename, meme_bg_filename
|
pinellolab/haystack_bio
|
haystack/haystack_common.py
|
Python
|
agpl-3.0
| 9,089
|
[
"Bioconda"
] |
df4e9bce7ad30b7f6213058eb84de0fdd2327609673e9eca696432fa3363212a
|
from rdkit import RDConfig
import gzip
import os.path
import pickle
from rdkit import Chem
from rdkit.Chem import Crippen
Crippen._Init()
def runIt(inFileName, outFileName, smiCol=0, maxMols=-1, delim=','):
inF = gzip.open(inFileName, 'r')
outF = open(outFileName, 'wb+')
mols = []
nDone = 0
for line in inF.readlines():
if line[0] != '#':
splitL = line.strip().split(delim)
smi = splitL[smiCol].strip()
print(smi)
mol = Chem.MolFromSmiles(smi)
if mol:
contribs = Crippen._GetAtomContribs(mol)
pickle.dump((smi, contribs), outF)
nDone += 1
if maxMols > 0 and nDone >= maxMols:
break
outF.close()
if __name__ == '__main__':
inFileName = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'buildingblocks.smi.gz')
outFileName = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data',
'Crippen_contribs_regress.2.pkl')
runIt(inFileName, outFileName, smiCol=1, delim='\t', maxMols=500)
|
ptosco/rdkit
|
rdkit/Chem/test_data/BuildCrippenTestSet.py
|
Python
|
bsd-3-clause
| 1,005
|
[
"RDKit"
] |
5a069915d9a367c0430d97edd6ab9d6df5054948966feec2a8a2830aefa29a12
|
import os
import cPickle as pickle
__all__ = ["Experiment", "experiments", "series", "filterInfo", "the"]
# os.walk from Python 2.6, which adds support for followlinks.
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
from os.path import join, getsize
for root, dirs, files in walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except os.error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def maybeInt(s):
if s.isdigit():
return int(s)
return s
def naturalSort(l):
l.sort(key = lambda d: map(maybeInt, d.split("-")))
class Experiment(object):
def __init__(self, path):
self.__path = path
self.__info = None
def __repr__(self):
return "Experiment(%r)" % self.__path
@property
def path(self):
return self.__path
@property
def info(self):
if self.__info == None:
# Old style
p = os.path.join(self.__path, "config")
if not os.path.exists(p):
# New style
p = os.path.join(self.__path, "info")
self.__info = pickle.load(file(p))
return self.__info
def openLog(self, cfgDict):
return file(os.path.join(self.__path, "log", cfgDict["name"]))
def experiments(*dirs):
"""Generate a sequence of Experiment's under dirs, sorted in a
reasonable way."""
for d in dirs:
for (dirpath, dirnames, filenames) in walk(d, followlinks=True):
if "info" in filenames or "config" in filenames:
dirnames[:] = []
yield Experiment(dirpath)
else:
naturalSort(dirnames)
def series(*dirs):
"""Find all experiment series under dirs. Generates a sequence of
(name, [Experiment]) pairs for each series where name is some
reasonable path-based identifier and [Experiment] is a list of
Experiment's in this series."""
# Find each series under all directories in dirs. Keep them in
# some reasonable order.
pdirMap = {} # series dir -> [point dir]
order = []
for top in dirs:
for (dirpath, dirnames, filenames) in walk(top, followlinks=True):
if "info" in filenames or "config" in filenames:
dirnames[:] = []
# dirpath contains the data point and its parent
# contains the series
sdir = os.path.dirname(dirpath) + "/"
if sdir not in pdirMap:
pdirMap[sdir] = []
order.append(sdir)
pdirMap[sdir].append(os.path.abspath(dirpath))
else:
naturalSort(dirnames)
# Get the common prefix of the series directories
common = os.path.commonprefix(order)
if not common.endswith("/"):
# commonprefix works character-by-character and we got part of
# a path. Trim off the partial match.
common = os.path.dirname(common) + "/"
# Yield each series
for sdir in order:
yield sdir[len(common):].rstrip("/"), map(Experiment, pdirMap[sdir])
def filterInfo(info, **selectors):
"""Filter the information list, returning only the information
dictionaries that match all of the items in selectors. The
'className' selector is treated specially to deal with subclassing
relations."""
className = selectors.pop("className", None)
for dct in info:
if className != None:
if "classNames" in dct:
if className not in dct["classNames"]:
continue
else:
raise ValueError("No class name for %r" % dct)
for k, v in selectors.iteritems():
if k not in dct or dct[k] != v:
break
else:
yield dct
def the(it):
"""If the given iterator produces just one unique value, return
it. Otherwise, raise ValueError."""
first = True
out = None
for v in it:
if first:
out = v
first = False
else:
if out != v:
raise ValueError("More than one value")
if first:
raise ValueError("Empty sequence")
return out
|
KMU-embedded/mosbench-ext
|
mparts/analyze.py
|
Python
|
mit
| 7,691
|
[
"VisIt"
] |
5b3b969667847575f66517699d87c5b3f7ea41fa54b623c39b0f4c3ae86bd45b
|
import os
import nest
import logging
import datetime
from time import clock
from parameters import *
from data import *
times = []
spikegenerators = {} # dict name_part : spikegenerator
spikedetectors = {} # dict name_part : spikedetector
multimeters = {} # dict name_part : multimeter
SYNAPSES = 0
FORMAT = '%(name)s.%(levelname)s: %(message)s.'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger('function')
def generate_neurons():
global NEURONS, all_parts
logger.debug("* * * Start generate neurons")
#parts_no_dopa =
#parts_with_dopa =
all_parts = tuple(sorted(parts_no_dopa + parts_with_dopa))
if test_flag:
# TEST NUMBER
enterial[enterial_GABA0][k_NN] = 1000
enterial[enterial_GABA1][k_NN] = 3000
enterial[enterial_Glu][k_NN] = 2000
enterial[enterial_ACh0][k_NN] = 620
enterial[enterial_ACh1][k_NN] = 1200
enterial[enterial_NA][k_NN] = 3020
enterial[enterial_DA0][k_NN] = 4080
enterial[enterial_DA1][k_NN] = 6010
enterial[enterial_DA2][k_NN] = 9000
dentate[dentate_GABA][k_NN] = 2000
dentate[dentate_Glu][k_NN] = 1000
dentate[dentate_ACh][k_NN] = 3000
dentate[dentate_NA][k_NN] = 3000
dentate[dentate_DA][k_NN] = 1240
dentate[dentate_SE][k_NN] = 3000
CA3[CA3_GABA0][k_NN] = 1000
CA3[CA3_GABA1][k_NN] = 1500
CA3[CA3_Glu][k_NN] = 2000
CA3[CA3_ACh0][k_NN] = 4500
CA3[CA3_ACh1][k_NN] = 2000
CA3[CA3_NA][k_NN] = 1000
CA3[CA3_DA][k_NN] = 1000
CA3[CA3_SE][k_NN] = 4000
CA1[CA1_GABA][k_NN] = 1000
CA1[CA1_Glu][k_NN] = 2500
CA1[CA1_ACh][k_NN] = 1000
CA1[CA1_DA][k_NN] = 3000
sub[sub_GABA][k_NN] = 2500
sub[sub_Glu][k_NN] = 2200
else:
# REAL NUMBER
enterial_cortex_II_NN = 110000 #DG and CA3 total neurons
enterial_cortex_III_NN = 250000 #CA1 total neurons
enterial_cortex_V_NN = 330000
enterial_cortex_NN = enterial_cortex_II_NN + enterial_cortex_III_NN + enterial_cortex_V_NN #total 690000
enterial[enterial_GABA0][k_NN] = 1000 #DG
enterial[enterial_GABA1][k_NN] = 3000 #CA1
enterial[enterial_Glu][k_NN] = 2000 #DG
enterial[enterial_ACh0][k_NN] = 620 #DG
enterial[enterial_ACh1][k_NN] = 1200 #CA1
enterial[enterial_NA][k_NN] = 3020 #CA1
enterial[enterial_DA0][k_NN] = 4080 #DG
enterial[enterial_DA1][k_NN] = 6010 #CA3
enterial[enterial_DA2][k_NN] = 9000 #CA1
dentate_NN = 1200000
dentate[dentate_GABA][k_NN] = 2000 #CA3
dentate[dentate_Glu][k_NN] = 1000 #CA3
dentate[dentate_ACh][k_NN] = 3000 #CA3
dentate[dentate_DA][k_NN] = 1240 #CA3
dentate[dentate_SE][k_NN] = 3000 #CA3
CA3_NN = 250000
CA3[CA3_GABA0][k_NN] = 1000 #CA3
CA3[CA3_GABA1][k_NN] = 1500 #CA1
CA3[CA3_Glu][k_NN] = 2000 #CA1
CA3[CA3_ACh0][k_NN] = 4500 #CA3
CA3[CA3_ACh1][k_NN] = 2000 #CA1
CA3[CA3_DA][k_NN] = 1000 #CA1
CA3[CA3_SE][k_NN] = 4000 #CA1
CA1_NN = 390000
CA1[CA1_GABA][k_NN] = 1000 #Subiculum
CA1[CA1_Glu][k_NN] = 2500 #Subiculum
CA1[CA1_ACh][k_NN] = 1000 #EC
CA1[CA1_DA][k_NN] = 3000 #Subiculum
subiculum_NN = 290000
sub[sub_GABA][k_NN] = 2500 #EC
sub[sub_Glu][k_NN] = 2200 #EC
for part in all_parts:
part[k_NN] = NN_minimal if int(part[k_NN] * NN_coef) < NN_minimal else int(part[k_NN] * NN_coef)
NEURONS = sum(item[k_NN] for item in all_parts)
logger.debug('Initialised: {0} neurons'.format(NEURONS))
# assign neuron params to every part
nest.SetDefaults('iaf_psc_exp', iaf_neuronparams)
nest.SetDefaults('iaf_psc_alpha', iaf_neuronparams)
# without dopamine
for part in parts_no_dopa:
part[k_model] = 'iaf_psc_exp'
# with dopamine
for part in parts_with_dopa:
part[k_model] = 'iaf_psc_alpha'
for part in all_parts:
part[k_IDs] = nest.Create(part[k_model], part[k_NN])
logger.debug("{0} [{1}, {2}] {3} neurons".format(part[k_name], part[k_IDs][0],
part[k_IDs][0] + part[k_NN] - 1,
part[k_NN]))
def log_connection(pre, post, syn_type, weight):
global SYNAPSES
SYNAPSES += pre[k_NN] * post[k_NN]
logger.debug("{0} -> {1} ({2}) w[{3}] // {4} synapses".format(pre[k_name], post[k_name],
syn_type, weight, pre[k_NN] * post[k_NN]))
def connect(pre, post, syn_type=GABA, weight_coef=1):
synapses[syn_type][0]['weight'] = weight_coef * synapses[syn_type][1]
nest.Connect(pre[k_IDs],
post[k_IDs],
conn_spec=conn_dict,
syn_spec=synapses[syn_type][3] if syn_type in (DA_ex, DA_in) else synapses[syn_type][0])
log_connection(pre, post, synapses[syn_type][2], synapses[syn_type][0]['weight'])
def connect_generator(part, startTime=1, stopTime=T, rate=250, coef_part=1):
name = part[k_name]
spikegenerators[name] = nest.Create('poisson_generator', 1, {'rate': float(rate),
'start': float(startTime),
'stop': float(stopTime)})
nest.Connect(spikegenerators[name], part[k_IDs],
syn_spec=static_syn,
conn_spec={'rule': 'fixed_outdegree',
'outdegree': int(part[k_NN] * coef_part)})
logger.debug("Generator => {0}. Element #{1}".format(name, spikegenerators[name][0]))
def connect_detector(part):
name = part[k_name]
number = part[k_NN] if part[k_NN] < N_detect else N_detect
spikedetectors[name] = nest.Create('spike_detector', params=detector_param)
nest.Connect(part[k_IDs][:number], spikedetectors[name])
logger.debug("Detector => {0}. Tracing {1} neurons".format(name, number))
def connect_multimeter(part):
name = part[k_name]
multimeters[name] = nest.Create('multimeter', params=multimeter_param) # ToDo add count of multimeters
nest.Connect(multimeters[name], (part[k_IDs][:N_volt]))
logger.debug("Multimeter => {0}. On {1}".format(name, part[k_IDs][:N_volt]))
'''Generates string full name of an image'''
def f_name_gen(path, name):
return "{0}{1}_{2}_dopamine_{3}.png".format(path, name, 'yes' if dopa_flag else 'no',
'noise' if generator_flag else 'static')
def simulate():
global startsimulate, endsimulate
begin = 0
nest.PrintNetwork()
logger.debug('* * * Simulating')
startsimulate = datetime.datetime.now()
for t in np.arange(0, T, dt):
print "SIMULATING [{0}, {1}]".format(t, t + dt)
nest.Simulate(dt)
end = clock()
times.append("{0:10.1f} {1:8.1f} {2:10.1f} {3:4.1f} {4}\n".format(begin, end - begin, end,
t, datetime.datetime.now().time()))
begin = end
print "COMPLETED {0}%\n".format(t/dt)
endsimulate = datetime.datetime.now()
logger.debug('* * * Simulation completed successfully')
def get_log(startbuild, endbuild):
logger.info("Number of neurons : {}".format(NEURONS))
logger.info("Number of synapses : {}".format(SYNAPSES))
logger.info("Building time : {}".format(endbuild - startbuild))
logger.info("Simulation time : {}".format(endsimulate - startsimulate))
for key in spikedetectors: #FixMe bug in '1000.0 / N_rec' in some case N_rec can be equal part[k_IDs]
print "***************"
print nest.GetStatus(spikedetectors[key])[0]
logger.info("{0:>18} rate: {1:.2f}Hz".format(key, nest.GetStatus(spikedetectors[key], 'n_events')[0] / T * 1000.0 / N_detect))
logger.info("Dopamine : {}".format('YES' if dopa_flag else 'NO'))
logger.info("Noise : {}".format('YES' if generator_flag else 'NO'))
def save(GUI):
global txtResultPath
SAVE_PATH = "/Users/komarovvitaliy/Desktop/testH/results/output-{0}/".format(NEURONS)
if GUI:
import pylab as pl
import nest.raster_plot
import nest.voltage_trace
logger.debug("Saving IMAGES into {0}".format(SAVE_PATH))
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
for key in spikedetectors:
try:
nest.raster_plot.from_device(spikedetectors[key], hist=True)
pl.savefig(f_name_gen(SAVE_PATH, "spikes_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print(" * * * from {0} is NOTHING".format(key))
for key in multimeters:
try:
nest.voltage_trace.from_device(multimeters[key])
pl.savefig(f_name_gen(SAVE_PATH, "volt_" + key.lower()), dpi=dpi_n, format='png')
pl.close()
except Exception:
print(" * * * from {0} is NOTHING".format(key))
txtResultPath = SAVE_PATH + 'txt/'
logger.debug("Saving TEXT into {0}".format(txtResultPath))
if not os.path.exists(txtResultPath):
os.mkdir(txtResultPath)
for key in spikedetectors:
save_spikes(spikedetectors[key], name=key) #, hist=True)
#for key in multimeters:
# save_voltage(multimeters[key], name=key)
with open(txtResultPath + 'timeSimulation.txt', 'w') as f:
for item in times:
f.write(item)
from collections import defaultdict
GlobalDICT = {}
def save_spikes(detec, name, hist=False):
title = "Raster plot from device '%i'" % detec[0]
ev = nest.GetStatus(detec, "events")[0]
ts = ev["times"]
gids = ev["senders"]
data = defaultdict(list)
temp_dict ={}
if len(ts):
with open("{0}@spikes_{1}.txt".format(txtResultPath, name), 'w') as f:
f.write("Name: {0}, Title: {1}, Hist: {2}\n".format(name, title, "True" if hist else "False"))
for num in range(0, len(ev["times"])):
data[round(ts[num], 1)].append(gids[num])
for key in sorted(data.iterkeys()):
f.write("{0:>5} {1:>4} : {2}\n".format(key, len(data[key]), sorted(data[key])))
temp_dict[key] = len(data[key])
else:
print "Spikes in {0} is NULL".format(name)
result_list = []
'''
if len(ts):
for i in np.arange(0, int(min(key for key in temp_dict)), 1):
result_list.append(0)
for i in np.arange(int(min(key for key in temp_dict)), int(max(key for key in temp_dict)) + 1, 1):
result_list.append( sum(temp_dict[key] for key in temp_dict if int(key) == i) )
for i in np.arange(max(key for key in temp_dict) + 1, int(T), 1):
result_list.append(0)
else:
for i in np.arange(0, T, 1):
result_list.append(0)
GlobalDICT[name] = result_list
'''
def save_voltage(detec, name):
title = "Membrane potential"
ev = nest.GetStatus(detec, "events")[0]
with open("{0}@voltage_{1}.txt".format(txtResultPath, name), 'w') as f:
f.write("Name: {0}, Title: {1}\n".format(name, title))
print int(T / multimeter_param['interval'])
for line in range(0, int(T / multimeter_param['interval'])):
for index in range(0, N_volt):
print "{0} {1} ".format(ev["times"][line], ev["V_m"][line])
#f.write("\n")
print "\n"
#ToDo => params={'spike_times': np.arange(1, T, 20.) in generator
def testUnit():
import matplotlib.pyplot as plt
data = [ GlobalDICT[key][:999] for key in GlobalDICT ]
plt.xlabel("Time in ms")
plt.ylabel("Part of brain")
plt.imshow(data, aspect='auto', interpolation='none', cmap="hot")
plt.show()
|
vitaliykomarov/NEUCOGAR
|
nest/GDP/scripts/func.py
|
Python
|
gpl-2.0
| 12,382
|
[
"NEURON"
] |
d89ceacdf4871995ecf1aab1e2bb52d1a13693f3c88c2c9300f1dc2200ef0696
|
"""
Acquisition functions based on the probability or expected value of
improvement.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
import scipy.stats as ss
# local imports
from ..utils import params
# exported symbols
__all__ = ['EI', 'PI']
def _integrate(index, models):
"""
Helper method which integrates the given index function over the given
models. Here `models` can be any iterable object where each element
returned by the iterator could have been passed to the index object itself.
"""
def index2(X, grad=False):
indices = [index(X, grad, model) for model in models]
if grad:
return tuple([np.sum(_, axis=0) for _ in zip(*indices)])
else:
return np.sum(indices, axis=0)
return index2
@params('xi')
def EI(model, xi=0.0):
"""
Expected improvement policy with an exploration parameter of `xi`.
"""
X, _ = model.data
f, _ = model.posterior(X)
target = f.max() + xi
# define the index wrt a single model (that should act like a GP model, ie
# in that it is marginally Gaussian and defines the posterior method).
def index(X, grad=False, model=model):
posterior = model.posterior(X, grad=grad)
mu, s2 = posterior[:2]
s = np.sqrt(s2)
d = mu - target
z = d / s
pdfz = ss.norm.pdf(z)
cdfz = ss.norm.cdf(z)
ei = d * cdfz + s * pdfz
if grad:
# get the derivative of ei. The mu/s2/etc. components are vectors
# collecting n scalar points, whereas dmu and ds2 are (n,d)-arrays.
# The indexing tricks just interpret the "scalar" quantities as
# (n,1)-arrays so that we can use numpy's broadcasting rules.
dmu, ds2 = posterior[2:]
dei = 0.5 * ds2 / s2[:, None]
dei *= (ei - s * z * cdfz)[:, None]
dei += cdfz[:, None] * dmu
return ei, dei
else:
return ei
if hasattr(model, '__iter__'):
return _integrate(index, model)
else:
return index
@params('xi')
def PI(model, xi=0.05):
"""
Probability of improvement policy with an exploration parameter of `xi`.
"""
X, _ = model.data
f, _ = model.posterior(X)
target = f.max() + xi
def index(X, grad=False, model=model):
posterior = model.posterior(X, grad=grad)
mu, s2 = posterior[:2]
s = np.sqrt(s2)
d = mu - target
z = d / s
cdfz = ss.norm.cdf(z)
if grad:
# get the derivative of pi. The mu/s2/etc. components are vectors
# collecting n scalar points, whereas dmu and ds2 are (n,d)-arrays.
# The indexing tricks just interpret the "scalar" quantities as
# (n,1)-arrays so that we can use numpy's broadcasting rules.
dmu, ds2 = posterior[2:]
dz = dmu / s[:, None] - 0.5 * ds2 * z[:, None] / s2[:, None]
pdfz = ss.norm.pdf(z)
return cdfz, dz * pdfz[:, None]
else:
return cdfz
if hasattr(model, '__iter__'):
return _integrate(index, model)
else:
return index
|
jhartford/pybo
|
pybo/bayesopt/policies/improvement.py
|
Python
|
bsd-2-clause
| 3,291
|
[
"Gaussian"
] |
80723b9659dc6cbde6fb1ca87e156c969b9fe7621302c6467b7fc723382f4065
|
#!/bin/python
from pylab import *
from math import exp, sqrt
from image import *
from filters import *
from nibabel import load
def exp2d():
R = 50
I = circle_image3(300, 300, 1, R)
S = []
R = []
kernel_radius = array([2, 2, 0])
for sigma in linspace(1, 11, 5):
I1 = add_gaussian_noise(I, sigma)
gaussian_kernel_matrix = gaussian_kernel(kernel_radius, sigma)
#print "filtering gaussian with kernel_radius %s" % kernel_radius
J = filter_image_kernel(I, gaussian_kernel_matrix)
similarity = image_similarity(I, J)
regularity = image_regularity(J[:, :, 0])
S.append(similarity)
R.append(regularity)
print "Finished sigma %f - (S,R)= (%f,%f)" % (sigma, similarity, regularity)
subplot(311)
imshow(I[:, :, 0])
subplot(312)
plot(S)
subplot(313)
plot(R)
show()
def exp_bilateral():
data = load("../report/img/300/T1w_acpc_dc_restore_1.25.nii.gz")
i = data.get_data()
i = i[:, :, 50]
i=rescale_grayscale_image(i)
#i=circle_image(300,300,50)
save_image_png(i,'original' )
sigma_noise=2
i = add_gaussian_noise(i, sigma_noise)
save_image_png(i,'noise_'+str(sigma_noise) )
#i=rescale_grayscale_image(i).astype(float32)
sigma=1
j = gaussian_filter(i, sigma)
save_image_png(j,'gaussian_%.2f' % sigma)
sigmaD,sigmaR=(1,200)
k = bilateral_filter(i, sigmaD, sigmaR)
save_image_png(k,'bilateral_d%.2f,_r%.2f' % (sigmaD,sigmaR))
## gray()
##
## imshow(i)
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
##
## figure()
## imshow(j)
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
##
## figure()
## imshow(k)
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
##
## show()
def mm_to_voxel(mm, M):
''' M: streching coefficients '''
return M.dot(mm)
def kernel_for_sigma(s):
return (s * 3).astype(int)
def streching_coefficients_mm(im):
a = im.get_affine()
return np.absolute(a[:3, :3])
def exp_images():
data = load("data/T1w_acpc_dc_restore_1.25.nii.gz")
affine = abs(data.get_affine()[:3, :3])
sizes = array([0.5, 0.5, 0.5]) * 4
kernel_size = kernel_for_sigma(mm_to_voxel(sizes, affine))
kernel_size = array([2, 2, 2])
sigmaD_gaussian = 0.35
sigmaD = 2
sigmaR = 0.2
ims = data.get_data()[50:100, 50:100, :50]
#ims=circle_image3(50,50,50,12.5)
kernel_function = bilateral_kernel(kernel_size, sigmaD, sigmaR)
print "filtering bilateral with kernel size %s" % kernel_size
i_bilateral = filter_image_function(ims, kernel_function, kernel_size)
print "done filtering with kernel size %s" % kernel_size
gaussian_kernel_matrix = gaussian_kernel(kernel_size, sigmaD_gaussian)
print "filtering gaussian with kernel size %s" % kernel_size
i_gaussian = filter_image_kernel(ims, gaussian_kernel_matrix)
print "done filtering gaussian with kernel size %s" % kernel_size
ss, rs, ms = (image_similarity(ims, ims), image_regularity(ims), ims.mean())
sb, rb, mb = (image_similarity(ims, i_bilateral), image_regularity(i_bilateral), i_bilateral.mean())
sg, rg, mg = (image_similarity(ims, i_gaussian), image_regularity(i_gaussian), i_gaussian.mean())
print "Self Similarity: %f / reg: %0.2f / mean: %f" % (ss, rs, ms)
print "Similarity to bilateral: %f / reg: %0.2f / mean: %0.2f " % (sb, rb, mb)
print "Similarity to gaussian: %f / reg: %0.2f / mean: %0.2f" % (sg, rb, mg)
image_to_show = 25
text_pos = 7
topo = 220
subplot(topo + 1)
imshow(ims[:, :, image_to_show])
colorbar()
title("Original")
text(text_pos, text_pos, "$reg=%0.2f$ \n $\mu=%0.2f$" % (rs, ms))
title("Original")
text(text_pos, text_pos, "$reg=%0.2f$ \n $\mu=%0.2f$" % (rs, ms))
subplot(topo + 2)
imshow(i_bilateral[:, :, image_to_show])
colorbar()
title("Bilateral:\n $\mathcal{R}$=%s,$\sigma_d$=%.2f,$\sigma_r$=%.2f" % (str(kernel_size), sigmaD, sigmaR))
text(text_pos, text_pos, "$sim=%0.2f,reg=%0.2f$\n $\mu=%0.2f$" % (sb, rb, mb))
subplot(topo + 3)
imshow(i_gaussian[:, :, image_to_show])
colorbar()
title("Gaussian:\n $\mathcal{R}$=%s,$\sigma_d$=%.2f" % (str(kernel_size), sigmaD_gaussian))
text(text_pos, text_pos, "$sim=%0.2f,reg=%0.2f$\n $\mu=%0.2f$" % (sg, rg, mg))
print image_regularity(ims)
print image_regularity3(ims)
show()
if __name__ == "__main__":
set_printoptions(precision=4, linewidth=150, suppress=True)
#exp_images()
exp_bilateral()
#exp2d()
|
facundoq/ipim
|
tp1/py/test_bilateral.py
|
Python
|
gpl-3.0
| 4,633
|
[
"Gaussian"
] |
e5094aa5f12eafb0a46cad826cb175de039cbd4f12036fd9d67bc115008d7cdc
|
# Copyright (C) 2011-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from __future__ import print_function
import sys
import math
import unittest as ut
import numpy as np
import espressomd
import espressomd.electrokinetics
import espressomd.shapes
import ek_common
##########################################################################
# Set up the System #
##########################################################################
# Set the slit pore geometry the width is the non-periodic part of the geometry
# the padding is used to ensure that there is no field inside outside the slit
@ut.skipIf(not espressomd.has_features(["ELECTROKINETICS", "EK_BOUNDARIES"]),
"Features not available, skipping test!")
class ek_eof_one_species_x(ut.TestCase):
es = espressomd.System(box_l=[1.0, 1.0, 1.0])
es.seed = es.cell_system.get_state()['n_nodes'] * [1234]
def test(self):
system = self.es
pi = math.pi
box_z = 4
box_y = 4
width = 32
padding = 6
box_x = width + 2 * padding
# Set the electrokinetic parameters
agrid = 0.5
dt = 1.0 / 5.0
force = 0.13
sigma = -0.03
viscosity_kinematic = 1.0
friction = 1.0
temperature = 2.3
bjerrum_length = 0.7
temperature_LB = agrid * agrid / (3.0 * dt * dt)
kB_LB = 1.0
cs_squared = (1.0 / 3.0) * (agrid * agrid / (dt * dt))
system.box_l = [box_x, box_y, box_z]
# Set the simulation parameters
system.time_step = dt
system.cell_system.skin = 0.1
system.thermostat.turn_off()
integration_length = 1500
# Set up the charged and neutral species
density_water = 26.15
density_counterions = -2.0 * float(sigma) / float(width)
valency = 1.0
# Set up the (LB) electrokinetics fluid
ek = espressomd.electrokinetics.Electrokinetics(
agrid=agrid,
lb_density=density_water,
viscosity=viscosity_kinematic,
friction=friction,
T=temperature,
prefactor=bjerrum_length * temperature,
stencil="nonlinear")
counterions = espressomd.electrokinetics.Species(
density=density_counterions,
D=0.3,
valency=valency,
ext_force_density=[
0,
force,
0])
ek.add_species(counterions)
# Set up the walls confining the fluid and carrying charge
ek_wall1 = espressomd.ekboundaries.EKBoundary(
charge_density=sigma /
(padding),
shape=espressomd.shapes.Wall(
normal=[
1,
0,
0],
dist=padding))
system.ekboundaries.add(ek_wall1)
ek_wall2 = espressomd.ekboundaries.EKBoundary(
charge_density=sigma / (padding), shape=espressomd.shapes.Wall(normal=[-1, 0, 0], dist=-(padding + width)))
system.ekboundaries.add(ek_wall2)
system.actors.add(ek)
# Integrate the system
system.integrator.run(integration_length)
# compare the various quantities to the analytic results
total_velocity_difference = 0.0
total_density_difference = 0.0
total_pressure_difference_xx = 0.0
total_pressure_difference_yy = 0.0
total_pressure_difference_zz = 0.0
total_pressure_difference_xy = 0.0
total_pressure_difference_yz = 0.0
total_pressure_difference_xz = 0.0
# initial parameters for bisection scheme
size = pi / (2.0 * width)
pnt0 = 0.0
pntm = pnt0 + size
pnt1 = pnt0 + 1.9 * size
# the bisection scheme
tol = 1.0e-08
while (size > tol):
val0 = ek_common.solve(pnt0, width, bjerrum_length, sigma, valency)
val1 = ek_common.solve(pnt1, width, bjerrum_length, sigma, valency)
valm = ek_common.solve(pntm, width, bjerrum_length, sigma, valency)
if (val0 < 0.0 and val1 > 0.0):
if (valm < 0.0):
pnt0 = pntm
size = size / 2.0
pntm = pnt0 + size
else:
pnt1 = pntm
size = size / 2.0
pntm = pnt1 - size
elif (val0 > 0.0 and val1 < 0.0):
if (valm < 0.0):
pnt1 = pntm
size = size / 2.0
pntm = pnt1 - size
else:
pnt0 = pntm
size = size / 2.0
pntm = pnt0 + size
else:
sys.exit(
"Bisection method fails:\nTuning of domain boundaries may be required.")
# obtain the desired xi value
xi = pntm
for i in range(int(box_x / agrid)):
if (i * agrid >= padding and i * agrid < box_x - padding):
xvalue = i * agrid - padding
position = i * agrid - padding - width / 2.0 + agrid / 2.0
# density
measured_density = counterions[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].density
calculated_density = ek_common.density(
position, xi, bjerrum_length)
density_difference = abs(measured_density - calculated_density)
total_density_difference = total_density_difference + \
density_difference
# velocity
measured_velocity = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].velocity[1]
calculated_velocity = ek_common.velocity(
position,
xi,
width,
bjerrum_length,
force,
viscosity_kinematic,
density_water)
velocity_difference = abs(
measured_velocity - calculated_velocity)
total_velocity_difference = total_velocity_difference + \
velocity_difference
# diagonal pressure tensor
measured_pressure_xx = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].pressure[(0, 0)]
calculated_pressure_xx = ek_common.hydrostatic_pressure_non_lin(
ek, position, xi, bjerrum_length, (0, 0), box_x, box_y, box_z, agrid, temperature)
measured_pressure_yy = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].pressure[(1, 1)]
calculated_pressure_yy = ek_common.hydrostatic_pressure_non_lin(
ek, position, xi, bjerrum_length, (1, 1), box_x, box_y, box_z, agrid, temperature)
measured_pressure_zz = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].pressure[(2, 2)]
calculated_pressure_zz = ek_common.hydrostatic_pressure_non_lin(
ek, position, xi, bjerrum_length, (2, 2), box_x, box_y, box_z, agrid, temperature)
pressure_difference_xx = abs(
measured_pressure_xx - calculated_pressure_xx)
pressure_difference_yy = abs(
measured_pressure_yy - calculated_pressure_yy)
pressure_difference_zz = abs(
measured_pressure_zz - calculated_pressure_zz)
total_pressure_difference_xx = total_pressure_difference_xx + \
pressure_difference_xx
total_pressure_difference_yy = total_pressure_difference_yy + \
pressure_difference_yy
total_pressure_difference_zz = total_pressure_difference_zz + \
pressure_difference_zz
# xy component pressure tensor
measured_pressure_xy = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].pressure[(0, 1)]
calculated_pressure_xy = ek_common.pressure_tensor_offdiagonal(
position, xi, bjerrum_length, force)
pressure_difference_xy = abs(
measured_pressure_xy - calculated_pressure_xy)
total_pressure_difference_xy = total_pressure_difference_xy + \
pressure_difference_xy
# yz component pressure tensor
measured_pressure_yz = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].pressure[(1, 2)]
calculated_pressure_yz = 0.0
pressure_difference_yz = abs(
measured_pressure_yz - calculated_pressure_yz)
total_pressure_difference_yz = total_pressure_difference_yz + \
pressure_difference_yz
# xz component pressure tensor
measured_pressure_xz = ek[i, int(
box_y / (2 * agrid)), int(box_z / (2 * agrid))].pressure[(0, 2)]
calculated_pressure_xz = 0.0
pressure_difference_xz = abs(
measured_pressure_xz - calculated_pressure_xz)
total_pressure_difference_xz = total_pressure_difference_xz + \
pressure_difference_xz
total_density_difference = agrid * total_density_difference / width
total_velocity_difference = agrid * total_velocity_difference / width
total_pressure_difference_xx = agrid * \
total_pressure_difference_xx / width
total_pressure_difference_yy = agrid * \
total_pressure_difference_yy / width
total_pressure_difference_zz = agrid * \
total_pressure_difference_zz / width
total_pressure_difference_xy = agrid * \
total_pressure_difference_xy / width
total_pressure_difference_yz = agrid * \
total_pressure_difference_yz / width
total_pressure_difference_xz = agrid * \
total_pressure_difference_xz / width
self.assertLess(total_density_difference, 1.0e-04,
"Density accuracy not achieved")
self.assertLess(total_velocity_difference, 1.0e-04,
"Velocity accuracy not achieved")
self.assertLess(total_pressure_difference_xx, 1.0e-04,
"Pressure accuracy xx component not achieved")
self.assertLess(total_pressure_difference_yy, 1.0e-04,
"Pressure accuracy yy component not achieved")
self.assertLess(total_pressure_difference_zz, 1.0e-04,
"Pressure accuracy zz component not achieved")
self.assertLess(total_pressure_difference_xy, 1.0e-04,
"Pressure accuracy xy component not achieved")
self.assertLess(total_pressure_difference_yz, 1.0e-04,
"Pressure accuracy yz component not achieved")
self.assertLess(total_pressure_difference_xz, 1.0e-04,
"Pressure accuracy xz component not achieved")
if __name__ == "__main__":
ut.main()
|
hmenke/espresso
|
testsuite/python/ek_eof_one_species_y_nonlinear.py
|
Python
|
gpl-3.0
| 11,836
|
[
"ESPResSo"
] |
e82f217bd8064f479a56901ac97dc0c4189a8ea273873e94c57f2e62a8498e0d
|
"""
test_GlobalOperations.py
This file is part of ANNarchy.
Copyright (C) 2021 Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ANNarchy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import numpy
from ANNarchy import Neuron, Population, Network, Monitor
class test_GlobalOps_1D(unittest.TestCase):
"""
ANNarchy support several global operations, there are always applied on
variables of *Population* objects. Currently the following methods
are supported:
* mean()
* max()
* min()
* norm1()
* norm2()
They are used in the equations of our neuron definition.
This particular test focuses on a one-dimensional *Population*.
"""
@classmethod
def setUpClass(self):
"""
Compile the network for this test
"""
neuron = Neuron(
parameters="""
r=0
""",
equations="""
mean_r = mean(r)
max_r = max(r)
min_r = min(r)
l1 = norm1(r)
l2 = norm2(r)
"""
)
pop = Population(6, neuron)
self.test_net = Network()
self.test_net.add([pop])
self.test_net.compile(silent=True)
self.net_pop = self.test_net.get(pop)
@classmethod
def tearDownClass(cls):
del cls.test_net
def setUp(self):
"""
In our *setUp()* function we set the variable *r*.
We also call *simulate()* to calculate mean/max/min.
"""
# reset() set all variables to init value (default 0), which is
# unfortunately meaningless for mean/max/min. So we set here some
# better values
self.net_pop.r = [2.0, 1.0, 0.0, -5.0, -3.0, -1.0]
# 1st step: calculate mean/max/min and store in intermediate
# variables
# 2nd step: write intermediate variables to accessible variables.
self.test_net.simulate(2)
def tearDown(self):
"""
After each test we call *reset()* to reset the network.
"""
self.test_net.reset()
def test_get_mean_r(self):
"""
Tests the result of *mean(r)* for *pop*.
"""
self.assertTrue( numpy.allclose( self.net_pop.mean_r, -1.0 ) )
def test_get_max_r(self):
"""
Tests the result of *max(r)* for *pop*.
"""
self.assertTrue( numpy.allclose( self.net_pop.max_r, 2.0) )
def test_get_min_r(self):
"""
Tests the result of *min(r)* for *pop*.
"""
self.assertTrue( numpy.allclose( self.net_pop.min_r, -5.0) )
def test_get_l1_norm(self):
"""
Tests the result of *norm1(r)* (L1 norm) for *pop*.
"""
self.assertTrue(numpy.allclose( self.net_pop.l1, 12.0))
def test_get_l2_norm(self):
"""
Tests the result of *norm2(r)* (L2 norm) for *pop*.
"""
# compute control value
l2norm = numpy.linalg.norm( self.net_pop.r, ord=2)
# test
self.assertTrue(numpy.allclose( self.net_pop.l2, l2norm))
class test_GlobalOps_1D_Large(unittest.TestCase):
@classmethod
def setUpClass(self):
"""
Compile the network for this test
"""
neuron = Neuron(
parameters="""
r=0
""",
equations="""
mean_r = mean(r)
max_r = max(r)
min_r = min(r)
l1 = norm1(r)
l2 = norm2(r)
"""
)
pop = Population(500, neuron)
self.test_net = Network()
self.test_net.add([pop])
self.test_net.compile(silent=True)
self.net_pop = self.test_net.get(pop)
@classmethod
def tearDownClass(cls):
del cls.test_net
def tearDown(self):
"""
After each test we call *reset()* to reset the network.
"""
self.test_net.reset()
def test_mean_r(self):
"""
"""
rand_val = numpy.random.random(500)
self.net_pop.r = rand_val
self.test_net.simulate(2)
self.assertTrue(numpy.allclose(self.net_pop.mean_r, numpy.mean(rand_val)))
def test_min_r(self):
"""
"""
rand_val = numpy.random.random(500)
self.net_pop.r = rand_val
self.test_net.simulate(2)
self.assertTrue(numpy.allclose(self.net_pop.min_r, numpy.amin(rand_val)))
def test_max_r(self):
"""
"""
rand_val = numpy.random.random(500)
self.net_pop.r = rand_val
self.test_net.simulate(2)
self.assertTrue(numpy.allclose(self.net_pop.max_r, numpy.amax(rand_val)))
class test_GlobalOps_2D(unittest.TestCase):
"""
ANNarchy support several global operations, there are always applied on
variables of *Population* objects. Currently the following methods
are supported:
* mean()
* max()
* min()
* norm1()
* norm2()
They are used in the equations of our neuron definition.
This particular test focuses on a two-dimensional *Population*.
"""
@classmethod
def setUpClass(self):
"""
Compile the network for this test
"""
neuron = Neuron(
parameters="""
r=0
""",
equations="""
mean_r = mean(r)
max_r = max(r)
min_r = min(r)
l1 = norm1(r)
l2 = norm2(r)
"""
)
pop = Population((2, 3), neuron)
self.test_net = Network()
self.test_net.add([pop])
self.test_net.compile(silent=True)
self.net_pop = self.test_net.get(pop)
@classmethod
def tearDownClass(cls):
del cls.test_net
def setUp(self):
"""
In our *setUp()* function we set the variable *r*.
We also call *simulate()* to calculate mean/max/min.
"""
# reset() set all variables to init value (default 0), which is
# unfortunately meaningless for mean/max/min. So we set here some
# better values
self.net_pop.r = [[ 2.0, 1.0, 0.0],
[-5.0, -3.0, -1.0]]
# 1st step: calculate mean/max/min and store in intermediate
# variables
# 2nd step: write intermediate variables to accessible variables.
self.test_net.simulate(2)
def tearDown(self):
"""
After each test we call *reset()* to reset the network.
"""
self.test_net.reset()
def test_get_mean_r(self):
"""
Tests the result of *mean(r)* for *pop*.
"""
self.assertTrue( numpy.allclose( self.net_pop.mean_r, -1.0 ) )
def test_get_max_r(self):
"""
Tests the result of *max(r)* for *pop*.
"""
self.assertTrue( numpy.allclose( self.net_pop.max_r, 2.0) )
def test_get_min_r(self):
"""
Tests the result of *min(r)* for *pop*.
"""
self.assertTrue( numpy.allclose( self.net_pop.min_r, -5.0) )
def test_get_l1_norm(self):
"""
Tests the result of *norm1(r)* for *pop*.
"""
self.assertTrue(numpy.allclose( self.net_pop.l1, 12.0))
|
vitay/ANNarchy
|
tests/Unittests/test_GlobalOperations.py
|
Python
|
gpl-2.0
| 7,962
|
[
"NEURON"
] |
17e93d397eb3c89368e4320db230da06a6752c52dba7eb41138c08550e77b25b
|
from __future__ import print_function
import getdist.plots as plots
import matplotlib.pyplot as plt
import numpy as np
from getdist.gaussian_mixtures import Mixture2D, Mixture1D, Gaussian1D, Gaussian2D, make_2D_Cov
default_nsamp = 10000
def simFiles(prob, file_root, sample_lengths=[1000, 2000, 5000, 10000, 20000, 50000, 100000], text=True):
for nn in sample_lengths:
samples = prob.MCSamples(nn, logLikes=True)
if text:
samples.saveAsText(file_root + '_' + str(nn))
else:
samples.savePickle(file_root + '.py_mcsamples')
def compareSimPlot2D(g, samples, density, pars=['x', 'y']):
g.plot_2d(samples, pars)
density.normalize('max')
levels = density.getContourLevels(contours=[0.68, 0.95])
g.add_2d_density_contours(density, filled=False, color='red', contour_levels=levels)
levels = density.getContourLevels(contours=[0.2, 0.4, 0.6, 0.8])
g.add_2d_density_contours(density, filled=False, color='magenta', alpha=0.5, contour_levels=levels)
def compareSimPlot(g, samples, density, par='x'):
g.plot_1d(samples, par)
density.normalize('max')
plt.plot(density.x, density.P, color='r')
def plot1DSim(g, prob, nsamp=default_nsamp, settings={}):
samps = prob.MCSamples(nsamp, settings=settings)
compareSimPlot(g, samps, prob.density1D())
def plot2DSim(g, prob, nsamp=default_nsamp, settings={}):
samps = prob.MCSamples(nsamp, settings=settings)
compareSimPlot2D(g, samps, prob.density2D())
def compare1D(g, probs, nsamp=default_nsamp, settings={}):
samples = []
for i, prob in enumerate(probs):
samps = prob.MCSamples(nsamp, settings=settings)
samples.append(samps)
g.make_figure(len(probs))
for i, (samps, prob) in enumerate(zip(samples, probs)):
g._subplot_number(i)
compareSimPlot(g, samps, prob.density1D())
g.add_text_left(prob.label, y=0.95)
plt.subplots_adjust()
def compare2D(g, probs, nsamp=default_nsamp, settings={}):
samples = []
for i, prob in enumerate(probs):
samps = prob.MCSamples(nsamp, settings=settings)
samples.append(samps)
g.make_figure(len(probs))
for i, (samps, prob) in enumerate(zip(samples, probs)):
g._subplot_number(i)
compareSimPlot2D(g, samps, prob.density2D())
g.add_text_left(prob.label, y=0.95)
plt.subplots_adjust()
def get2DMises(prob, nsamp=default_nsamp, nsim=20, scales=np.arange(0.6, 1.5, 0.1), settings={}):
Mises = np.zeros(np.asarray(scales).size)
for _ in range(nsim):
samps = prob.MCSamples(nsamp, settings=settings)
for i, scale in enumerate(scales):
density = samps.get2DDensity('x', 'y', smooth_scale_2D=-scale)
density.normalize()
if i == 0:
xgrid, ygrid = np.meshgrid(density.x, density.y)
mean = prob.pdf(xgrid, ygrid)
mean /= density.integrate(mean)
Mises[i] += np.sum((mean - density.P) ** 2) / np.sum(mean ** 2)
Mises /= (nsim - 1)
return scales, Mises
def get1DMises(prob, nsamp=default_nsamp, nsim=50, scales=[0.6, 1.5, 0.1], settings={}):
Mises = np.zeros(np.asarray(scales).size)
for _ in range(nsim):
samps = prob.MCSamples(nsamp, settings=settings)
for i, scale in enumerate(scales):
density = samps.get1DDensity('x', smooth_scale_1D=-scale)
density.normalize()
if i == 0:
mean = prob.pdf(density.x)
if prob.lims is not None:
mean /= density.integrate(mean)
Mises[i] += np.sum((mean - density.P) ** 2) / np.sum(mean ** 2)
Mises /= (nsim - 1)
return scales, Mises
class Test1DDistributions(object):
def __init__(self):
self.gauss = Gaussian1D(0, 0.5, label='Gaussian')
self.skew = Mixture1D([0, 1], [1, 0.4], [0.6, 0.4], label='skew')
self.tailed = Mixture1D([0, 0], [1, 3], [0.8, 0.2], label='tailed')
self.flat = Gaussian1D(0, 3, xmin=-1, xmax=2, label='flat')
self.broad = Mixture1D([0, 0.3], [1, 2], [0.6, 0.4], label='broad')
self.flat_top = Mixture1D([0, 1.5, 3], [1, 1, 1], [0.4, 0.2, 0.4], label='flat top')
self.bimodal = []
self.bimodal.append(Mixture1D([0, 2], [0.5, 0.5], [0.6, 0.4], label='bimodal 1'))
self.bimodal.append(Mixture1D([0, 2], [0.2, 0.5], [0.5, 0.5], label='bimodal 2'))
self.trimodal = []
self.trimodal.append(Mixture1D([0, 2, 5], [0.2, 0.7, 0.4], label='trimodal'))
self.cut_gaussians = self.cutGaussians()
self.shape_set = [self.gauss, self.skew, self.tailed, self.broad, self.flat, self.flat_top]
self.all = self.shape_set + self.bimodal + self.trimodal + self.cut_gaussians
def cutGaussians(self, sigma=1, cut_x=[-1.5, -1, -0.5, 0, 1, 1.5]):
return [Gaussian1D(0, sigma, xmin=cut, label=r'Gaussian [$x>%s$]' % cut) for cut in cut_x]
def distributions(self):
return self.all
class Test2DDistributions(object):
def __init__(self):
self.gauss = Gaussian2D([0, 0], (0.7, 1, 0.3), label='Gaussian')
self.bending = Mixture2D([[0, 0], [2, 1.8]], [(np.sqrt(0.5), 1, 0.9), (1, 1, 0.8)], [0.6, 0.4], xmin=-1, label='bending')
self.hammer = Mixture2D([[0, 0], [1, 1.8]], [(np.sqrt(0.5), 1, 0.9), (0.3, 1, -0.7)], [0.5, 0.5], label='hammer')
cov = make_2D_Cov(np.sqrt(0.5), 1, 0.1)
self.skew = Mixture2D([[0, 0], [0, 1.2]], [cov, cov / 4], [0.5, 0.5], label='skew')
cov = make_2D_Cov(np.sqrt(0.5), 1, 0.1)
self.broadtail = Mixture2D([[0, 0], [0, 0.2]], [cov, cov * 8], [0.9, 0.1], label='broad tail')
self.tensorlike = Mixture2D([[0, 0.03], [0, 0.03]], [(0.03, 0.03, 0.1), (0.03, 0.06, 0.1)], [0.85, 0.15], ymin=0, label='tensor like')
self.rotating = Mixture2D([[0, 0], [0, 0.2]], [(1, 1, 0.5), (2, 2, -0.5)], [0.6, 0.4], label='rotating')
self.tight = Mixture2D([[0, 0], [2.5, 3.5]], [(1, 1, 0.99), (1, 1.5, 0.98)], [0.6, 0.4], label='tight')
self.cut_correlated = Gaussian2D([0, 0], (0.7, 1, 0.95), ymin=0.3, xmax=1.2, label='cut correlated')
self.shape_set = [self.gauss, self.bending, self.hammer, self.skew, self.broadtail, self.rotating, self.tight,
self.cut_correlated, self.tensorlike]
self.cut_gaussians = self.cutGaussians((0.7, 1, 0.3))
# these examples are from Wand and Jones 93
self.bimodal = []
self.bimodal.append(Mixture2D([[-1, 0], [1, 0]], [(2. / 3, 2. / 3, 0), (2. / 3, 2. / 3, 0)], label='bimodal WJ1'))
self.bimodal.append(Mixture2D([[-3. / 2, 0], [3. / 2, 0]], [(1. / 4, 1, 0), (1. / 4, 1, 0)], label='bimodal WJ2'))
self.bimodal.append(Mixture2D([[-1, 1], [1, -1]], [(2. / 3, 2. / 3, 3. / 5), (2. / 3, 2. / 3, 3. / 5)], label='bimodal WJ3'))
self.bimodal.append(Mixture2D([[1, -1], [-1, 1]], [(2. / 3, 2. / 3, 7. / 10), (2. / 3, 2. / 3, 0)], label='bimodal WJ4'))
self.trimodal = []
self.trimodal.append(Mixture2D([[-6. / 5, 6. / 5], [6. / 5, -6. / 5], [0, 0]],
[(3. / 5, 3. / 5, 3. / 10), (3. / 5, 3. / 5, -3. / 5), (0.25, 0.25, 0.2)], weights=[9, 9, 2], label='trimodal WJ1'))
self.trimodal.append(Mixture2D([[-6. / 5, 0], [6. / 5, 0], [0, 0]],
[(3. / 5, 3. / 5, 0.7), (3. / 5, 3. / 5, 0.7), (0.25, 0.25, -0.7)], label='trimodal WJ2'))
self.trimodal.append(Mixture2D([[-1, 0], [1, 2 * np.sqrt(3) / 3], [1, -2 * np.sqrt(3) / 3]],
[(0.6, 0.7, 0.6), (0.6, 0.7, 0), (0.4, 0.7, 0)], weights=[3, 3, 1], label='trimodal WJ3'))
self.quadrimodal = []
self.quadrimodal.append(Mixture2D([[-1, 1], [-1, -1], [1, -1], [1, 1]],
[(2. / 3, 2. / 3, 2. / 5), (2. / 3, 2. / 3, 3. / 5), (2. / 3, 2. / 3, -0.7), (2. / 3, 2. / 3, -0.5)],
weights=[1, 3, 1, 3], label='quadrimodal'))
self.all = self.shape_set + self.bimodal + self.trimodal + self.quadrimodal + self.cut_gaussians
def cutGaussians(self, cov, cut_x=[-2, -1, -0.5, 0, 1, 1.5, 2]):
return [Gaussian2D([0, 0], cov, xmin=cut, label=r'Gaussian [$x>%s$]' % cut) for cut in cut_x]
def distributions(self):
return self.all
def plot_compare_method(ax, prob, colors=['k'], sims=100, nsamp=default_nsamp,
scalings=[0.3, 0.5, 0.7, 0.9, 1, 1.1, 1.3, 1.5], test_settings=[None], linestyles=['-']):
# compare Parzen estimator with higher order
print(prob.label, ', size = ', nsamp)
if len(colors) == 1: colors = colors * len(scalings)
if len(linestyles) == 1: linestyles = linestyles * len(scalings)
miselist = np.empty((len(scalings), len(test_settings)))
for i, (settings, ls, color) in enumerate(zip(test_settings, linestyles, colors)):
if prob.dim == 1:
scales, MISEs = get1DMises(prob, nsamp=nsamp, scales=scalings, nsim=sims, settings=settings)
else:
scales, MISEs = get2DMises(prob, nsamp=nsamp, scales=scalings, nsim=sims, settings=settings)
ax.plot(scales, MISEs, ls=ls, color=color)
miselist[:, i] = MISEs
for i, scale in enumerate(scalings):
print(scale, miselist[i, :])
ax.set_yscale('log')
ax.set_xlim([scalings[0], scalings[-1]])
# ax.set_yticks(ax.get_yticks()[1:-1])
def plot_compare_probs_methods(ax, probs, colors=['b', 'r', 'k', 'm', 'c'], **kwargs):
for prob, col in zip(probs, colors):
plot_compare_method(ax, prob, col, **kwargs)
def compare_method_nsims(g, probs, sizes=[1000, 10000], **kwargs):
g.make_figure(len(sizes))
for i, size in enumerate(sizes):
ax = g._subplot_number(i)
plot_compare_probs_methods(ax, probs, nsmap=size, **kwargs)
def compare_method(probs, nx=2, fname='', **kwargs):
ny = (len(probs) - 1) // nx + 1
fig, axs = plt.subplots(ny, nx, sharex=True, sharey=True, squeeze=False , figsize=(nx * 3, ny * 3))
for i, prob in enumerate(probs):
ax = axs.reshape(-1)[i]
plot_compare_method(ax, prob, **kwargs)
ax.text(0.05, 0.06, prob.label, transform=ax.transAxes, horizontalalignment='left')
ax.axvline(1, color='gray', ls='--', alpha=0.5)
if prob.dim == 2:
if kwargs.get('nsamp') > 15000:
ax.set_ylim(6e-6, 8e-3)
elif kwargs.get('nsamp') > 5000:
ax.set_ylim(2e-4, 5e-2)
else:
if kwargs.get('nsamp') > 15000:
ax.set_ylim(6e-6, 8e-4)
elif kwargs.get('nsamp') > 5000:
ax.set_ylim(4e-5, 6e-3)
plt.subplots_adjust()
plt.tight_layout(0, 0, 0)
if fname: fig.savefig(fname, bbox_inches='tight')
def join_subplots(ax_array):
for ax in ax_array.reshape(-1):
if ax is not None:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.tight_layout(0, 0, 0)
def testProgram():
import time
import argparse
parser = argparse.ArgumentParser(description='make getdist test plots from test Gaussian mixture distributions')
parser.add_argument('--sims', type=int, default=100, help='Number of simulations per case')
parser.add_argument('--nsamp', type=int, default=10000, help='Number of (independent) samples per simulation')
parser.add_argument('--plots', nargs='*', default=['dists_1D', 'dists_2D'], help='names of plots to make')
parser.add_argument('--mbc', type=int, default=1, help='mult_bias_correction_order')
parser.add_argument('--bco', type=int, default=1, help='boundary_correction_order')
args = parser.parse_args()
test1D = Test1DDistributions()
test2D = Test2DDistributions()
test_settings = {'mult_bias_correction_order':args.mbc, 'boundary_correction_order':args.bco, 'smooth_scale_1D':-1, 'smooth_scale_2D':-1}
g = plots.getSubplotPlotter(subplot_size=2)
if 'ISE_1D' in args.plots:
compare_method(test1D.distributions(), nx=3,
test_settings=[ {'mult_bias_correction_order':1, 'boundary_correction_order':1},
{'mult_bias_correction_order':2, 'boundary_correction_order':1},
{'mult_bias_correction_order':0, 'boundary_correction_order':0},
{'mult_bias_correction_order':0, 'boundary_correction_order':1},
{'mult_bias_correction_order':0, 'boundary_correction_order':2},
], colors=['k', 'b', 'r', 'm', 'c', 'g'], linestyles=['-', '-', ':', '-.', '--'],
fname='compare_method_1d_N%s.pdf' % args.nsamp,
sims=args.sims, nsamp=args.nsamp
)
if 'ISE_2D' in args.plots:
compare_method(test2D.distributions(), nx=4,
test_settings=[ {'mult_bias_correction_order':1, 'boundary_correction_order':1},
{'mult_bias_correction_order':2, 'boundary_correction_order':1},
{'mult_bias_correction_order':0, 'boundary_correction_order':0},
{'mult_bias_correction_order':0, 'boundary_correction_order':1},
], colors=['k', 'b', 'r', 'm', 'c', 'g'], linestyles=['-', '-', ':', '-.', '--'],
fname='compare_method_2d_N%s.pdf' % args.nsamp,
sims=args.sims, nsamp=args.nsamp
)
if args.plots is None or 'dists_1D' in args.plots:
g.newPlot()
start = time.time()
compare1D(g, test1D.distributions(), nsamp=args.nsamp, settings=test_settings)
print('1D timing:', time.time() - start)
join_subplots(g.subplots)
plt.savefig('test_dists_1D_mbc%s_bco%s_N%s.pdf' % (args.mbc, args.bco, args.nsamp), bbox_inches='tight')
if args.plots is None or 'dists_2D' in args.plots:
g.newPlot()
start = time.time()
compare2D(g, test2D.distributions(), nsamp=args.nsamp, settings=test_settings)
print('2D timing:', time.time() - start)
join_subplots(g.subplots)
plt.savefig('test_dists_2D_mbc%s_bco%s_N%s.pdf' % (args.mbc, args.bco, args.nsamp), bbox_inches='tight')
plt.show()
if False:
print('testing 1D gaussian MISE...')
scales, MISEs = get1DMises(test1D.gauss)
for scale, MISE in zip(scales, MISEs):
print(scale, MISE, np.sqrt(MISE))
print('testing 2D gaussian MISE...')
scales, MISEs = get2DMises(test2D.gauss)
for scale, MISE in zip(scales, MISEs):
print(scale, MISE, np.sqrt(MISE))
if __name__ == "__main__":
testProgram()
|
ClaudioNahmad/Servicio-Social
|
Parametros/CosmoMC/CosmoMC-master/python/getdist_tests/test_distributions.py
|
Python
|
gpl-3.0
| 14,699
|
[
"Gaussian"
] |
98f2a7283428fa496eb91845a0d134117a316965a57f5ae060235aa994a300d1
|
#
# Author: Henrique Pereira Coutada Miranda
# Run a Silicon groundstate calculation using Quantum Espresso
#
from __future__ import print_function, division
import sys
from qepy import *
import argparse
from schedulerpy import *
kpoints = [12,12,1]
kpoints_double = [24,24,1]
qpoints = [3,3,1]
layer_separation = 12
pw = 'pw.x'
q2r = 'q2r.x'
matdyn = 'matdyn.x'
prefix = 'bn'
npoints = 10
p = Path([ [[0.0, 0.0, 0.0],'G'],
[[0.5, 0.0, 0.0],'M'],
[[1./3,1./3,0.0],'K'],
[[0.0, 0.0, 0.0],'G']], [int(npoints*2),int(npoints),int(sqrt(5)*npoints)])
# scheduler
scheduler = Scheduler.factory
# create the input files
def get_inputfile():
""" Define a Quantum espresso input file for boron nitride
"""
qe = PwIn()
qe.atoms = [['N',[0.0,0.0,0.5]],
['B',[1/3,2/3,0.5]]]
qe.atypes = {'B': [10.811, "B.pbe-mt_fhi.UPF"],
'N': [14.0067,"N.pbe-mt_fhi.UPF"]}
qe.control['prefix'] = "'%s'"%prefix
qe.control['verbosity'] = "'high'"
qe.control['wf_collect'] = '.true.'
qe.control['pseudo_dir'] = "'../pseudos/'"
qe.system['celldm(1)'] = 4.7
qe.system['celldm(3)'] = layer_separation/qe.system['celldm(1)']
qe.system['ecutwfc'] = 60
qe.system['occupations'] = "'fixed'"
qe.system['nat'] = 2
qe.system['ntyp'] = 2
qe.system['ibrav'] = 4
qe.kpoints = [9, 9, 1]
qe.electrons['conv_thr'] = 1e-10
return qe
#relax
def relax():
if not os.path.isdir('relax'):
os.mkdir('relax')
qe = get_inputfile()
qe.control['calculation'] = "'vc-relax'"
qe.ions['ion_dynamics'] = "'bfgs'"
qe.cell['cell_dynamics'] = "'bfgs'"
qe.cell['cell_dofree'] = "'2Dxy'"
qe.write('relax/%s.scf'%prefix)
#scf
def scf(folder='scf'):
if not os.path.isdir(folder):
os.mkdir(folder)
qe = get_inputfile()
qe.control['calculation'] = "'scf'"
qe.write('%s/%s.scf'%(folder,prefix))
#nscf
def nscf(kpoints,folder='nscf'):
if not os.path.isdir(folder):
os.mkdir(folder)
qe = get_inputfile()
qe.control['calculation'] = "'nscf'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-8
qe.system['nbnd'] = 60
qe.system['force_symmorphic'] = ".true."
qe.kpoints = kpoints
qe.write('%s/%s.nscf'%(folder,prefix))
#bands
def bands():
if not os.path.isdir('bands'):
os.mkdir('bands')
qe = get_inputfile()
qe.control['calculation'] = "'bands'"
qe.electrons['diago_full_acc'] = ".true."
qe.electrons['conv_thr'] = 1e-6
qe.system['nbnd'] = 6
qe.system['force_symmorphic'] = ".true."
qe.ktype = 'crystal'
qe.set_path(p)
qe.write('bands/%s.bands'%prefix)
def phonon(kpoints,qpoints,folder='phonon'):
if not os.path.isdir(folder):
os.mkdir(folder)
ph = PhIn()
ph['nq1'],ph['nq2'],ph['nq3'] = qpoints
ph['tr2_ph'] = 1e-8
ph['prefix'] = "'%s'"%prefix
ph['epsil'] = ".false."
ph['trans'] = ".true."
ph['fildyn'] = "'%s.dyn'"%prefix
ph['fildrho'] = "'%s.drho'"%prefix
ph['ldisp'] = ".true."
ph.write('%s/%s.ph'%(folder,prefix))
md = DynmatIn()
md['asr'] = "'simple'"
md['fildyn'] = "'%s.dyn1'"%prefix
md['filout'] = "'%s.modes'"%prefix
md.write('%s/%s.dynmat'%(folder,prefix))
def update_positions(pathin,pathout):
""" update the positions of the atoms in the scf file using the output of the relaxation loop
"""
e = PwXML(prefix,path=pathin)
pos = e.get_scaled_positions()
#open relaxed cell
qin = PwIn('%s/%s.scf'%(pathin,prefix))
#open scf file
qout = PwIn('%s/%s.scf'%(pathout,prefix))
#update positions on scf file
print("old celldm(1)", qin.system['celldm(1)'])
qout.system['celldm(1)'] = e.cell[0][0]
print("new celldm(1)", qout.system['celldm(1)'])
qout.atoms = zip([a[0] for a in qin.atoms],pos)
#write scf
qout.write('%s/%s.scf'%(pathout,prefix))
def run_plot():
print("running plotting:")
xml = PwXML(prefix=prefix,path='bands')
xml.plot_eigen(p)
def run_bands(nthreads=1):
print("running bands:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save bands/"%prefix)
qe_run.add_command("cd bands; mpirun -np %d %s -inp %s.bands -nk %d > bands.log"%(nthreads,pw,prefix,nthreads))
qe_run.run()
qe_run.clean()
print("done!")
if __name__ == "__main__":
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-r' ,'--relax', action="store_true", help='Structural relaxation')
parser.add_argument('-s' ,'--scf', action="store_true", help='Self-consistent calculation')
parser.add_argument('-n' ,'--nscf', action="store_true", help='Non-self consistent calculation')
parser.add_argument('-n2','--nscf_double', action="store_true", help='Non-self consistent calculation for the double grid')
parser.add_argument('-b' ,'--bands', action="store_true", help='Calculate band-structure')
parser.add_argument('-p' ,'--phonon', action="store_true", help='Phonon calculation')
parser.add_argument('-d' ,'--dispersion', action="store_true", help='Phonon dispersion')
parser.add_argument('-t' ,'--nthreads', help='Number of threads', default=2 )
args = parser.parse_args()
nthreads = int(args.nthreads)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
# create input files and folders
relax()
scf()
nscf(kpoints)
nscf(kpoints_double, folder='nscf_double')
bands()
phonon(kpoints,qpoints)
if args.relax:
print("running relax:")
qe_run = scheduler()
qe_run.add_command("cd relax; %s -inp %s.scf > relax.log"%(pw,prefix)) #relax
qe_run.run()
update_positions('relax','scf')
print("done!")
if args.scf:
print("running scf:")
qe_run = scheduler()
qe_run.add_command("cd scf; mpirun -np %d %s -inp %s.scf > scf.log"%(nthreads,pw,prefix)) #scf
qe_run.run()
print("done!")
if args.nscf:
print("running nscf:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save nscf/"%prefix) #nscf
qe_run.add_command("cd nscf; mpirun -np %d %s -nk %d -inp %s.nscf > nscf.log"%(nthreads,pw,nthreads,prefix)) #nscf
qe_run.run()
print("done!")
if args.nscf_double:
print("running nscf_double:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save nscf_double/"%prefix) #nscf
qe_run.add_command("cd nscf_double; mpirun -np %d %s -inp %s.nscf > nscf_double.log"%(nthreads,pw,prefix)) #nscf
qe_run.run()
print("done!")
if args.phonon:
print("running phonon:")
qe_run = scheduler()
qe_run.add_command("cp -r scf/%s.save phonon/"%prefix)
qe_run.add_command("cd phonon; mpirun -np %d ph.x -inp %s.ph > phonon.log"%(nthreads,prefix)) #phonon
qe_run.add_command("dynmat.x < %s.dynmat > dynmat.log"%prefix) #matdyn
qe_run.run()
print("done!")
if args.dispersion:
qe_run = scheduler()
#q2r
disp = DynmatIn()
disp['fildyn']= "'%s.dyn'" % prefix
disp['zasr'] = "'simple'"
disp['flfrc'] = "'%s.fc'" % prefix
disp.write('phonon/q2r.in')
qe_run.add_command('cd phonon; %s < q2r.in'%q2r)
#dynmat
dyn = DynmatIn()
dyn['flfrc'] = "'%s.fc'" % prefix
dyn['asr'] = "'simple'"
dyn['flfrq'] = "'%s.freq'" % prefix
dyn['q_in_cryst_coord'] = '.true.'
dyn.qpoints = p.get_klist()
dyn.write('phonon/matdyn.in')
qe_run.add_command('%s < matdyn.in'%matdyn)
qe_run.run()
# matdyn class to read and plot the frequencies
m = Matdyn(natoms=2,path=p,folder='phonon')
m.plot_eigen()
if args.bands:
run_bands(nthreads)
run_plot()
|
henriquemiranda/yambopy
|
tutorial/bn/gs_bn.py
|
Python
|
bsd-3-clause
| 8,018
|
[
"CRYSTAL",
"Quantum ESPRESSO"
] |
cfe54b6c2e354e9d1067b27bd5472aaf18ee872e2a1d785d79e9c9bbff263148
|
"""Simulating individual cycles, of different types."""
import numpy as np
from scipy.signal import sawtooth
from scipy.stats import norm
from neurodsp.sim.info import get_sim_func
from neurodsp.utils.data import compute_nsamples
from neurodsp.utils.checks import check_param_range, check_param_options
from neurodsp.utils.decorators import normalize
from neurodsp.sim.transients import sim_synaptic_kernel, sim_action_potential
###################################################################################################
###################################################################################################
def sim_cycle(n_seconds, fs, cycle_type, phase=0, **cycle_params):
"""Simulate a single cycle of a periodic pattern.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
This is NOT the period of the cycle, but the length of the returned array of the cycle.
fs : float
Sampling frequency of the cycle simulation.
cycle_type : str or callable
What type of cycle to simulate. String label options include:
* sine: a sine wave cycle
* asine: an asymmetric sine cycle
* sawtooth: a sawtooth cycle
* gaussian: a gaussian cycle
* skewed_gaussian: a skewed gaussian cycle
* exp: a cycle with exponential decay
* 2exp: a cycle with exponential rise and decay
* exp_cos: an exponential cosine cycle
* asym_harmonic: an asymmetric cycle made as a sum of harmonics
* ap: an action potential
phase : float or {'min', 'max'}, optional, default: 0
If non-zero, applies a phase shift by rotating the cycle.
If a float, the shift is defined as a relative proportion of cycle, between [0, 1].
If 'min' or 'max', the cycle is shifted to start at it's minima or maxima.
**cycle_params
Keyword arguments for parameters of the cycle, all as float:
* sine: None
* asine: `rdsym`, rise-decay symmetry, from 0-1
* sawtooth: `width`, width of the rising ramp as a proportion of the total cycle
* gaussian: `std`, standard deviation of the gaussian kernel, in seconds
* skewed_gaussian: `center`, `std`, `alpha`, `height`
* exp: `tau_d`, decay time, in seconds
* 2exp: `tau_r` & `tau_d` rise time, and decay time, in seconds
* exp_cos: `exp`, `scale`, `shift`
* asym_harmonic: `phi`, the phase at each harmonic and `n_harmonics`
* ap: `centers`, `stds`, `alphas`, `heights`
Returns
-------
cycle : 1d array
Simulated cycle.
Examples
--------
Simulate a half second sinusoid, corresponding to a 2 Hz cycle (frequency=1/n_seconds):
>>> cycle = sim_cycle(n_seconds=0.5, fs=500, cycle_type='sine')
Simulate a sawtooth cycle, corresponding to a 10 Hz cycle:
>>> cycle = sim_cycle(n_seconds=0.1, fs=500, cycle_type='sawtooth', width=0.3)
Notes
-----
Any function defined in sim.cycles as `sim_label_cycle(n_seconds, fs, **params)`,
is accessible by this function. The `cycle_type` input must match the label.
"""
if isinstance(cycle_type, str):
cycle_func = get_sim_func('sim_' + cycle_type + '_cycle', modules=['cycles'])
else:
cycle_func = cycle_type
cycle = cycle_func(n_seconds, fs, **cycle_params)
cycle = phase_shift_cycle(cycle, phase)
return cycle
@normalize
def sim_normalized_cycle(n_seconds, fs, cycle_type, phase=0, **cycle_params):
return sim_cycle(n_seconds, fs, cycle_type, phase, **cycle_params)
sim_normalized_cycle.__doc__ = sim_cycle.__doc__
def sim_sine_cycle(n_seconds, fs):
"""Simulate a cycle of a sine wave.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
fs : float
Sampling frequency of the cycle simulation.
Returns
-------
cycle : 1d array
Simulated sine cycle.
Examples
--------
Simulate a cycle of a 1 Hz sine wave:
>>> cycle = sim_sine_cycle(n_seconds=1, fs=500)
"""
times = create_cycle_time(n_seconds, fs)
cycle = np.sin(times)
return cycle
def sim_asine_cycle(n_seconds, fs, rdsym, side='both'):
"""Simulate a cycle of an asymmetric sine wave.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
Note that this is NOT the period of the cycle, but the length of the returned array
that contains the cycle, which can be (and usually is) much shorter.
fs : float
Sampling frequency of the cycle simulation.
rdsym : float
Rise-decay symmetry of the cycle, as fraction of the period in the rise time, where:
= 0.5 - symmetric (sine wave)
< 0.5 - shorter rise, longer decay
> 0.5 - longer rise, shorter decay
side : {'both', 'peak', 'trough'}
Which side of the cycle to make asymmetric.
Returns
-------
cycle : 1d array
Simulated asymmetric cycle.
Examples
--------
Simulate a 2 Hz asymmetric sine cycle:
>>> cycle = sim_asine_cycle(n_seconds=0.5, fs=500, rdsym=0.75)
"""
check_param_range(rdsym, 'rdsym', [0., 1.])
check_param_options(side, 'side', ['both', 'peak', 'trough'])
# Determine number of samples
n_samples = compute_nsamples(n_seconds, fs)
half_sample = int(n_samples/2)
# Check for an odd number of samples (for half peaks, we need to fix this later)
remainder = n_samples % 2
# Calculate number of samples rising
n_rise = int(np.round(n_samples * rdsym))
n_rise1 = int(np.ceil(n_rise/2))
n_rise2 = int(np.floor(n_rise/2))
# Calculate number of samples decaying
n_decay = n_samples - n_rise
n_decay1 = half_sample - n_rise1
# Create phase definition for cycle with both extrema being asymmetric
if side == 'both':
phase = np.hstack([np.linspace(0, np.pi/2, n_rise1 + 1),
np.linspace(np.pi/2, -np.pi/2, n_decay + 1)[1:-1],
np.linspace(-np.pi/2, 0, n_rise2 + 1)[:-1]])
# Create phase definition for cycle with only one extrema being asymmetric
elif side == 'peak':
half_sample += 1 if bool(remainder) else 0
phase = np.hstack([np.linspace(0, np.pi/2, n_rise1 + 1),
np.linspace(np.pi/2, np.pi, n_decay1 + 1)[1:-1],
np.linspace(-np.pi, 0, half_sample + 1)[:-1]])
elif side == 'trough':
half_sample -= 1 if not bool(remainder) else 0
phase = np.hstack([np.linspace(0, np.pi, half_sample + 1)[:-1],
np.linspace(-np.pi, -np.pi/2, n_decay1 + 1),
np.linspace(-np.pi/2, 0, n_rise1 + 1)[:-1]])
# Convert phase definition to signal
cycle = np.sin(phase)
return cycle
def sim_sawtooth_cycle(n_seconds, fs, width):
"""Simulate a cycle of a sawtooth wave.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
fs : float
Sampling frequency of the cycle simulation.
width : float
Width of the rising ramp as a proportion of the total cycle.
Returns
-------
cycle : 1d array
Simulated sawtooth cycle.
Examples
--------
Simulate a symmetric cycle of a sawtooth wave:
>>> cycle = sim_sawtooth_cycle(n_seconds=0.25, fs=500, width=0.5)
"""
check_param_range(width, 'width', [0., 1.])
times = create_cycle_time(n_seconds, fs)
cycle = sawtooth(times, width)
return cycle
def sim_gaussian_cycle(n_seconds, fs, std, center=.5):
"""Simulate a cycle of a gaussian.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
fs : float
Sampling frequency of the cycle simulation.
std : float
Standard deviation of the gaussian kernel, in seconds.
center : float, optional, default: 0.5
The center of the gaussian.
Returns
-------
cycle : 1d array
Simulated gaussian cycle.
Examples
--------
Simulate a cycle of a gaussian wave:
>>> cycle = sim_gaussian_cycle(n_seconds=0.2, fs=500, std=0.025)
"""
xs = np.linspace(0, 1, compute_nsamples(n_seconds, fs))
cycle = np.exp(-(xs-center)**2 / (2*std**2))
return cycle
def sim_skewed_gaussian_cycle(n_seconds, fs, center, std, alpha, height=1):
"""Simulate a cycle of a skewed gaussian.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
fs : float
Sampling frequency of the cycle simulation.
center : float
The center of the skewed gaussian.
std : float
Standard deviation of the gaussian kernel, in seconds.
alpha : float
Magnitude and direction of the skew.
height : float, optional, default: 1.
Maximum value of the cycle.
Returns
-------
cycle : 1d array
Output values for skewed gaussian function.
"""
n_samples = compute_nsamples(n_seconds, fs)
# Gaussian distribution
cycle = sim_gaussian_cycle(n_seconds, fs, std, center)
# Skewed cumulative distribution function.
# Assumes time are centered around 0. Adjust to center around non-zero.
times = np.linspace(-1, 1, n_samples)
cdf = norm.cdf(alpha * ((times - ((center * 2) - 1)) / std))
# Skew the gaussian
cycle = cycle * cdf
# Rescale height
cycle = (cycle / np.max(cycle)) * height
return cycle
def sim_exp_cos_cycle(n_seconds, fs, exp, scale=2, shift=1):
"""Simulate an exponential cosine cycle.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
fs : float
Sampling frequency of the cycle simulation.
exp : float
Exponent controlling the amplitude asymmetry of peaks relative to the troughs.
- `exp=0` : zeros
- `exp=.5`: wide peaks, narrow troughs
- `exp=1.`: symmetrical peaks and troughs (sine wave)
- `exp=5.`: wide troughs, narrow peaks
scale : float, optional, default: 2
Rescales the amplitude of the signal.
shift : float, optional, default: 1
Translate the signal along the y-axis.
Returns
-------
cycle : 1d array
Simulated exponential cosine cycle.
Notes
-----
- This exponential cosine cycle is implemented as Equation 9 of [1]_.
..math::
cycle = ((cos(2\pi ft) + 1) / 2)^{exp}
References
----------
.. [1] Lozano-Soldevilla, D., Huurne, N. T., & Oostenveld, R. (2016). Neuronal
Oscillations with Non-sinusoidal Morphology Produce Spurious Phase-to-Amplitude
Coupling and Directionality. Frontiers in Computational Neuroscience, 10.
DOI: https://doi.org/10.3389/fncom.2016.00087
Examples
--------
Simulate a cycle of an exponential cosine wave:
>>> cycle = sim_exp_cos_cycle(1, 500, exp=2)
"""
check_param_range(exp, 'exp', [0., np.inf])
times = create_cycle_time(n_seconds, fs)
cycle = ((-np.cos(times) + shift) / scale)**exp
return cycle
def sim_asym_harmonic_cycle(n_seconds, fs, phi, n_harmonics):
"""Simulate an asymmetrical cycle as a sum of harmonics.
Parameters
----------
n_seconds : float
Length of cycle window in seconds.
fs : float
Sampling frequency of the cycle simulation.
phi : float
Phase at each harmonic.
n_harmonics : int
Number of harmonics to sum across.
Returns
-------
cycle : 1d array
Simulated asymmetrical harmonic cycle.
Notes
-----
- This asymmetric cycle is implemented as Equation 10 of [1]_.
.. math::
cycle = \sum_{j=1}^{j} \dfrac{1}{j^2} \cdot cos(j2\pi ft)+(j-1)*\phi
References
----------
.. [1] Lozano-Soldevilla, D., Huurne, N. T., & Oostenveld, R. (2016). Neuronal
Oscillations with Non-sinusoidal Morphology Produce Spurious Phase-to-Amplitude
Coupling and Directionality. Frontiers in Computational Neuroscience, 10.
DOI: https://doi.org/10.3389/fncom.2016.00087
Examples
--------
Simulate an asymmetrical cycle as the sum of harmonics:
>>> cycle = sim_asym_harmonic_cycle(1, 500, phi=1, n_harmonics=1)
"""
times = create_cycle_time(n_seconds, fs)
cycs = np.zeros((n_harmonics+1, len(times)))
harmonics = np.array(range(1, n_harmonics + 2))
for idx, jth in enumerate(harmonics):
cycs[idx] = (1 / jth**2) * np.cos(jth*times+(jth-1)*phi)
cycle = np.sum(cycs, axis=0)
return cycle
# Alias action potential from `sim_action_potential`
def sim_ap_cycle(n_seconds, fs, centers, stds, alphas, heights):
return sim_action_potential(n_seconds, fs, centers, stds, alphas, heights)
sim_ap_cycle.__doc__ = sim_action_potential.__doc__
# Alias single exponential cycle from `sim_synaptic_kernel`
def sim_exp_cycle(n_seconds, fs, tau_d):
return sim_synaptic_kernel(n_seconds, fs, tau_r=0, tau_d=tau_d)
sim_exp_cycle.__doc__ = sim_synaptic_kernel.__doc__
# Alias double exponential cycle from `sim_synaptic_kernel`
def sim_2exp_cycle(n_seconds, fs, tau_r, tau_d):
return sim_synaptic_kernel(n_seconds, fs, tau_r=tau_r, tau_d=tau_d)
sim_2exp_cycle.__doc__ = sim_synaptic_kernel.__doc__
def create_cycle_time(n_seconds, fs):
"""Create a vector of time indices, in radians, for a single cycle.
Parameters
----------
n_seconds : float
Length of simulated kernel in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
Returns
-------
1d array
Time indices.
Examples
--------
Create time indices, in radians, for a single cycle:
>>> indices = create_cycle_time(n_seconds=1, fs=500)
"""
return 2 * np.pi * 1 / n_seconds * (np.arange(n_seconds * fs) / fs)
def phase_shift_cycle(cycle, shift):
"""Phase shift a simulated cycle time series.
Parameters
----------
cycle : 1d array
Cycle values to apply a rotation shift to.
shift : float or {'min', 'max'}
If non-zero, applies a phase shift by rotating the cycle.
If a float, the shift is defined as a relative proportion of cycle, between [0, 1].
If 'min' or 'max', the cycle is shifted to start at it's minima or maxima.
Returns
-------
cycle : 1d array
Rotated cycle.
Examples
--------
Phase shift a simulated sine wave cycle:
>>> cycle = sim_cycle(n_seconds=0.5, fs=500, cycle_type='sine')
>>> shifted_cycle = phase_shift_cycle(cycle, shift=0.5)
"""
if isinstance(shift, (float, int)):
check_param_range(shift, 'shift', [0., 1.])
else:
check_param_options(shift, 'shift', ['min', 'max'])
if shift == 'min':
shift = np.argmin(cycle)
elif shift == 'max':
shift = np.argmax(cycle)
else:
shift = int(np.round(shift * len(cycle)))
indices = range(shift, shift+len(cycle))
cycle = cycle.take(indices, mode='wrap')
return cycle
|
voytekresearch/neurodsp
|
neurodsp/sim/cycles.py
|
Python
|
apache-2.0
| 15,139
|
[
"Gaussian"
] |
3c7604c1e3a9926c22e5eff46eca63cd9f7597547eb1e6e16405bc9129d56e55
|
#!/usr/bin/python
#
# Copyright 2010 Brian Dolbec <brian.dolbec@gmail.com>
# Copyright(c) 2010, Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
"""Provides support functions to enalyze modules"""
from gentoolkit import errors
from gentoolkit.keyword import reduce_keywords
from gentoolkit.flag import (reduce_flags, get_flags, get_all_cpv_use,
filter_flags, get_installed_use, defaulted_flags)
#from gentoolkit.package import Package
import portage
class FlagAnalyzer(object):
"""Specialty functions for analysing an installed package's
USE flags. Can be used for single or mulitple use without
needing to be reset unless the system USE flags are changed.
@type system: list or set
@param system: the default system USE flags.
@type _get_flags: function
@param _get_flags: Normally defaulted, can be overriden for testing
@type _get_used: function
@param _get_used: Normally defaulted, can be overriden for testing
"""
def __init__(self,
system,
filter_defaults=False,
target="USE",
_get_flags=get_flags,
_get_used=get_installed_use
):
self.get_flags = _get_flags
self.get_used = _get_used
self.filter_defaults = filter_defaults
self.target = target
self.reset(system)
def reset(self, system):
"""Resets the internal system USE flags and use_expand variables
to the new setting. The use_expand variable is handled internally.
@type system: list or set
@param system: the default system USE flags.
"""
self.system = set(system)
self.use_expand = portage.settings['USE_EXPAND'].lower().split()
def analyse_cpv(self, cpv):
"""Gets all relavent USE flag info for a cpv and breaks them down
into 3 sets, plus (package.use enabled), minus ( package.use disabled),
unset.
@param cpv: string. 'cat/pkg-ver'
@rtype tuple of sets
@return (plus, minus, unset) sets of USE flags
"""
installed = set(self.get_used(cpv, self.target))
_iuse = self.get_flags(cpv)
iuse = set(reduce_flags(_iuse))
iuse_defaults = defaulted_flags(_iuse)
return self._analyse(installed, iuse, iuse_defaults)
def _analyse(self, installed, iuse, iuse_defaults):
"""Analyzes the supplied info and returns the flag settings
that differ from the defaults
@type installed: set
@param installed: the installed with use flags
@type iuse: set
@param iuse: the current ebuilds IUSE
"""
defaults = self.system.intersection(iuse)
# update defaults with iuse_defaults
defaults.update(iuse_defaults['+'])
defaults = defaults.difference(iuse_defaults['-'])
usedflags = iuse.intersection(set(installed))
if self.filter_defaults:
plus = usedflags.difference(defaults)
else:
plus = usedflags
minus = defaults.difference(usedflags)
unset = iuse.difference(defaults, plus, minus)
cleaned_unset = self.remove_expanding(unset)
return (plus, minus, cleaned_unset)
def analyse_pkg(self, pkg):
"""Gets all relevent USE flag info for a pkg and breaks them down
into 3 sets, plus (package.use enabled), minus ( package.use disabled),
unset.
@param pkg: gentoolkit.package.Package object
@rtype tuple of sets
@return (plus, minus, unset) sets of USE flags
"""
installed = set(self.pkg_used(pkg))
#print("installed =", installed)
_iuse = self.pkg_flags(pkg)
iuse = set(reduce_flags(_iuse))
iuse_defaults = defaulted_flags(_iuse)
#print("iuse =", iuse)
return self._analyse(installed, iuse, iuse_defaults)
def pkg_used(self, pkg):
if self.target == "USE":
return pkg.use().split()
return pkg.environment(self.target).split()
def pkg_flags(self, pkg):
final_use, use_expand_hidden, usemasked, useforced = \
get_all_cpv_use(pkg.cpv)
flags = pkg.environment("IUSE", prefer_vdb=False).split()
return filter_flags(flags, use_expand_hidden, usemasked, useforced)
def redundant(self, cpv, iuse):
"""Checks for redundant settings.
future function. Not yet implemented.
"""
pass
def remove_expanding(self, flags):
"""Remove unwanted USE_EXPAND flags
from unset IUSE sets
@param flags: short list or set of USE flags
@rtype set
@return USE flags
"""
_flags = set(flags)
for expander in self.use_expand:
for flag in flags:
if expander in flag:
_flags.remove(flag)
if not _flags:
break
return _flags
class KeywordAnalyser(object):
"""Specialty functions for analysing the installed package db for
keyword useage and the packages that used them.
Note: should be initialized with the internal set_order() before use.
See internal set_order() for more details.
This class of functions can be used for single cpv checks or
used repeatedly for an entire package db.
@type arch: string
@param arch: the system ARCH setting
@type accept_keywords: list
@param accept_keywords: eg. ['x86', '~x86']
@type get_aux: function, defaults to: portage.db[portage.root]["vartree"].dbapi.aux_get
@param vardb: vardb class of functions, needed=aux_get()
to return => KEYWORDS & USE flags for a cpv
= aux_get(cpv, ["KEYWORDS", "USE"])
"""
# parsing order to determine appropriate keyword used for installation
normal_order = ['stable', 'testing', 'prefix', 'testing_prefix', 'missing']
prefix_order = ['prefix', 'testing_prefix', 'stable', 'testing', 'missing']
parse_range = list(range(len(normal_order)))
def __init__(self, arch, accept_keywords, vardb=portage.db[portage.root]["vartree"].dbapi):
self.arch = arch
self.accept_keywords = accept_keywords
self.vardb = vardb
self.prefix = ''
self.parse_order = None
self.check_key = {
'stable': self._stable,
'testing': self._testing,
'prefix': self._prefix,
'testing_prefix': self._testing_prefix,
'missing': self._missing
}
self.mismatched = []
def determine_keyword(self, keywords, used, cpv):
"""Determine the keyword from the installed USE flags and
the KEYWORDS that was used to install a package.
@param keywords: list of keywords available to install a pkg
@param used: list of USE flalgs recorded for the installed pkg
@rtype: string
@return a keyword or null string
"""
used = set(used)
kwd = None
result = ''
if keywords:
absolute_kwds = reduce_keywords(keywords)
kwd = list(used.intersection(absolute_kwds))
#if keywords == ['~ppc64']:
#print "Checked keywords for kwd", keywords, used, "kwd =", kwd
if not kwd:
#print "Checking for kwd against portage.archlist"
absolute_kwds = reduce_keywords(keywords)
# check for one against archlist then re-check
kwd = list(absolute_kwds.intersection(portage.archlist))
#print "determined keyword =", kwd
if len(kwd) == 1:
key = kwd[0]
#print "determined keyword =", key
elif not kwd:
#print "kwd != 1", kwd, cpv
result = self._missing(self.keyword, keywords)
else: # too many, try to narrow them dowm
#print "too many kwd's, trying to match against arch"
_kwd = list(set(kwd).intersection(self.arch))
key = ''
if _kwd:
#print "found one! :)", _kwd
key = _kwd
else: # try re-running the short list against archlist
#print "Checking kwd for _kwd against portage.archlist"
_kwd = list(set(kwd).intersection(portage.archlist))
if _kwd and len(_kwd) == 1:
#print "found one! :)", _kwd
key = _kwd[0]
else:
#print " :( didn't work, _kwd =", _kwd, "giving up on:", cpv
result = self._missing(self.keyword, keywords)
i = 0
while not result and i in self.parse_range:
parsekey = self.parse_order[i]
result = self.check_key[parsekey](key, keywords)
i += 1
return result
def _stable(self, key, keywords):
"""test for a normal stable keyword"""
if key in keywords:
return key
return ''
def _testing(self, key, keywords):
"""test for a normal testing keyword"""
if ("~" + key) in keywords:
return "~" + key
return ''
def _prefix(self, key, keywords):
"""test for a stable prefix keyword"""
if not self.prefix:
return ''
_key = '-'.join([key, self.prefix])
if _key in keywords:
#print key, "is in", keywords
return _key
return ''
def _testing_prefix(self, key, keywords):
"""test for a testing prefix keyword"""
if not self.prefix:
return ''
_key = "~" +'-'.join([key, self.prefix])
if _key in keywords:
#print key, "is in", keywords
return _key
return ''
def _missing(self, key, keywords):
"""generates a missing keyword to return"""
if self.prefix and key != self.keyword:
_key = '-'.join([key, self.prefix])
else:
_key = '-' + key
#print "_missisng :( _key =", _key
return _key
def get_inst_keyword_cpv(self, cpv):
"""Determines the installed with keyword for cpv
@type cpv: string
@param cpv: an installed CAT/PKG-VER
@rtype: string
@returns a keyword determined to have been used to install cpv
"""
keywords, used = self.vardb.aux_get(cpv, ["KEYWORDS", "USE"])
keywords = keywords.split()
used = used.split()
return self._parse(keywords, used, cpv=cpv)
def get_inst_keyword_pkg(self, pkg):
"""Determines the installed with keyword for cpv
@param pkg: gentoolkit.package.Package object
@rtype: string
@returns a keyword determined to have been used to install cpv
"""
keywords, used = pkg.environment(["KEYWORDS", "USE"],
prefer_vdb=True, fallback=False)
keywords = keywords.split()
used = used.split()
return self._parse(keywords, used, pkg=pkg)
def _parse(self, keywords, used, pkg=None, cpv=None):
if pkg:
_cpv = pkg.cpv
else:
_cpv = cpv
if not self.parse_order:
self.set_order(used)
keyword = self.keyword
# sanity check
if self.arch not in used:
#print "Found a mismatch = ", cpv, self.arch, used
self.mismatched.append(_cpv)
if keyword in keywords:
#print "keyword", keyword, "is in", keywords
return keyword
elif "~"+keyword in keywords:
#print "~keyword", keyword, "is in", keywords
return "~"+keyword
else:
keyword = self.determine_keyword(keywords, used, _cpv)
if not keyword:
raise errors.GentoolkitUnknownKeyword(_cpv, ' '.join(keywords), used)
return keyword
def set_order(self, used):
"""Used to set the parsing order to determine a keyword
used for installation.
This is needed due to the way prefix arch's and keywords
work with portage. It looks for the 'prefix' flag. A positive result
sets it to the prefix order and keyword.
@type used: list
@param used: a list of pkg USE flags or the system USE flags"""
if 'prefix' in used:
#print "SET_ORDER() Setting parse order to prefix"
prefix = None
self.parse_order = self.prefix_order
for key in self.accept_keywords:
#print "SET_ORDER() '"+key+"'"
if '-' in key:
#print "SET_ORDER()found prefix keyword :", key
if self.arch in key:
prefix = key.split('-')[1]
#print "prefix =", prefix
self.prefix = prefix
self.keyword = '-'.join([self.arch, prefix])
else:
#print "SET_ORDER() Setting parse order to normal"
self.parse_order = self.normal_order
self.keyword = self.arch
#print "SET_ORDER() completed: prefix =", self.prefix, ", keyword =", \
# self.keyword, "parse order =",self.parse_order
#print
|
zmedico/gentoolkit
|
pym/gentoolkit/enalyze/lib.py
|
Python
|
gpl-2.0
| 11,122
|
[
"Brian"
] |
142283d0f69789ac0254b53e8b2a86a47054f2319c32f5228efea43973bcc9d6
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
import warnings
import gtk
from kiwi.ui.objectlist import ObjectList, Column
from stoqlib.api import api
from stoqlib.gui.base.dialogs import BasicDialog
from stoqlib.gui.utils.keybindings import (get_bindings,
set_user_binding,
remove_user_binding,
remove_user_bindings,
get_binding_categories)
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
# Gtk+ bug 639455 fixed in 2011-01-04 in 3.x branch
warnings.filterwarnings(
action='ignore', module='stoqlib.gui.base.dialogs',
message=("Object class GtkCellEditableEventBox doesn't implement "
"property 'editing-canceled' from interface 'GtkCellEditable'"))
class ShortcutColumn(Column):
def __init__(self, attribute, title, editor, **kwargs):
Column.__init__(self, attribute=attribute, title=title,
data_type=str, **kwargs)
self.editor = editor
def create_renderer(self, model):
renderer = gtk.CellRendererAccel()
renderer.props.editable = True
renderer.props.accel_mode = gtk.CELL_RENDERER_ACCEL_MODE_OTHER
renderer.connect('accel-edited', self._on_accel_edited)
renderer.connect('accel-cleared', self._on_accel_cleared)
return renderer, 'text'
def _on_accel_edited(self, renderer, path, accel_key, mods, keycode):
model = self._objectlist.get_model()
binding = model[path][0]
binding.shortcut = gtk.accelerator_name(accel_key, mods)
self.editor.set_binding(binding)
def _on_accel_cleared(self, renderer, path):
model = self._objectlist.get_model()
binding = model[path][0]
self.editor.remove_binding(binding)
binding.shortcut = None
class ShortcutsEditor(BasicDialog):
size = (700, 400)
title = _("Keyboard shortcuts")
def __init__(self):
BasicDialog.__init__(self, size=ShortcutsEditor.size,
title=ShortcutsEditor.title)
self._create_ui()
def _create_ui(self):
self.cancel_button.hide()
hbox = gtk.HBox(spacing=6)
self.main.remove(self.main.get_child())
self.main.add(hbox)
hbox.show()
self.categories = ObjectList(
[Column('label', sorted=True, expand=True)],
get_binding_categories(),
gtk.SELECTION_BROWSE)
self.categories.connect('selection-changed',
self._on_categories__selection_changed)
self.categories.set_headers_visible(False)
self.categories.set_size_request(200, -1)
hbox.pack_start(self.categories, False, False)
self.categories.show()
box = gtk.VBox(spacing=6)
hbox.pack_start(box)
box.show()
self.shortcuts = ObjectList(self._get_columns(), [],
gtk.SELECTION_BROWSE)
box.pack_start(self.shortcuts)
self.shortcuts.show()
self._label = gtk.Label(
_("You need to restart Stoq for the changes to take effect"))
box.pack_start(self._label, False, False, 6)
box.show()
defaults_button = gtk.Button(_("Reset defaults"))
defaults_button.connect('clicked', self._on_defaults_button__clicked)
self.action_area.pack_start(defaults_button, False, False, 6)
self.action_area.reorder_child(defaults_button, 0)
defaults_button.show()
def _on_categories__selection_changed(self, categories, category):
if not category:
return
self.shortcuts.add_list(get_bindings(category.name), clear=True)
def _on_defaults_button__clicked(self, button):
old = self.categories.get_selected()
api.user_settings.remove('shortcuts')
remove_user_bindings()
self._label.show()
self.categories.refresh()
self.categories.select(old)
def _get_columns(self):
return [Column('description', _("Description"), data_type=str,
expand=True, sorted=True),
ShortcutColumn('shortcut', _("Shortcut"), self)]
def set_binding(self, binding):
set_user_binding(binding.name, binding.shortcut)
d = api.user_settings.get('shortcuts', {})
d[binding.name] = binding.shortcut
self._label.show()
def remove_binding(self, binding):
remove_user_binding(binding.name)
d = api.user_settings.get('shortcuts', {})
try:
del d[binding.name]
except KeyError:
pass
self._label.show()
|
andrebellafronte/stoq
|
stoqlib/gui/editors/shortcutseditor.py
|
Python
|
gpl-2.0
| 5,622
|
[
"VisIt"
] |
9c216302c45201eada3b3d34aaf071ecf6d4e8a4aa3f5001e529d5d5a9a4d0a0
|
#
# Copyright 2014-2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import unittest
import numpy as np
import ase
import ase.lattice.hexagonal
import matscipytest
from matscipy.fracture_mechanics.idealbrittlesolid import (
find_triangles_2d,
IdealBrittleSolid,
triangular_lattice_slab,
)
from matscipy.numerical import numerical_forces, numerical_stress
###
class TestNeighbours(matscipytest.MatSciPyTestCase):
tol = 1e-6
def test_forces_and_virial(self):
a = triangular_lattice_slab(1.0, 2, 2)
calc = IdealBrittleSolid(rc=1.2, beta=0.0)
a.set_calculator(calc)
a.rattle(0.1)
f = a.get_forces()
fn = numerical_forces(a)
self.assertArrayAlmostEqual(f, fn, tol=self.tol)
self.assertArrayAlmostEqual(a.get_stress(),
numerical_stress(a),
tol=self.tol)
def test_forces_linear(self):
a = triangular_lattice_slab(1.0, 1, 1)
calc = IdealBrittleSolid(rc=1.2, beta=0.0, linear=True)
calc.set_reference_crystal(a)
a.set_calculator(calc)
a.rattle(0.01)
f = a.get_forces()
fn = numerical_forces(a)
self.assertArrayAlmostEqual(f, fn, tol=self.tol)
def test_two_triangles(self):
a = ase.Atoms('4Xe', [[0,0,0], [1,0,0], [1,1,0], [0,1,0]])
a.center(vacuum=10)
c1, c2, c3 = find_triangles_2d(a, 1.1)
self.assertArrayAlmostEqual(np.transpose([c1, c2, c3]), [[0,1,2], [0,1,3], [0,2,3], [1,2,3]])
###
if __name__ == '__main__':
unittest.main()
|
libAtoms/matscipy
|
tests/test_idealbrittlesolid.py
|
Python
|
lgpl-2.1
| 3,378
|
[
"ASE",
"Matscipy"
] |
768e92f51bb502f9b4a9865c93cb718a67bef06541efd7ca0a226b16f4e07c9f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.traces.tracetypes.trace import Trace
import numpy as np
import itertools
class PieceWiseComponentVisitor(object):
@classmethod
def visit(cls, o, **kwargs):
return o.accept_visitor(cls, **kwargs)
@classmethod
def visit_linear(cls, o, **kwargs):
raise NotImplementedError()
@classmethod
def visit_flat(cls, o, **kwargs):
raise NotImplementedError()
class TracePieceFunction(object):
def __init__(self, time_window):
self.time_window = time_window
def get_min_time(self):
return self.time_window[0]
def get_max_time(self):
return self.time_window[1]
def get_duration(self):
return self.get_max_time() - self.get_min_time()
def get_start_value(self):
raise NotImplementedError()
def get_end_value(self):
raise NotImplementedError()
# To allow for manipulation:
def accept_visitor(self):
raise NotImplementedError()
class TracePieceFunctionLinear(TracePieceFunction):
def __init__(self, time_window, x0=None, x1=None):
assert x0 is not None and x1 is not None
super(TracePieceFunctionLinear, self).__init__(time_window=time_window)
self.x0 = x0
self.x1 = x1
def accept_visitor(self, visitor, **kwargs):
return visitor.visit_linear(self, **kwargs)
def get_start_value(self):
return self.x0
def get_end_value(self):
return self.x1
def get_values(self, times):
t_scaled = ( (times - self.get_min_time()) / self.get_duration() )
x_scaled = self.get_start_value() + (self.x1 - self.x0) *t_scaled
return x_scaled
#return np.ones(len(times)) * self.x
class TracePieceFunctionFlat(TracePieceFunction):
def __init__(self, time_window, x=None):
super(TracePieceFunctionFlat, self).__init__(time_window=time_window)
assert x is not None
self.x = x
def get_value(self):
return self.x
def get_values(self, times):
return np.ones(len(times)) * self.x
def get_start_value(self):
return self.x
def get_end_value(self):
return self.x
def accept_visitor(self, visitor, **kwargs):
return visitor.visit_flat(self, **kwargs)
class TracePiecewise(Trace):
def __init__(self, pieces, name=None, comment=None, tags=None):
super(TracePiecewise, self).__init__(name=name, comment=comment, tags=tags)
self._pieces = pieces
# Check we link up:
for i in range(len(pieces) - 1):
i_stop = self._pieces[i].get_max_time()
i_next_start = self._pieces[i + 1].get_min_time()
dist = i_stop - i_next_start
assert np.fabs(dist.rescale('ms').magnitude) < 0.001
@property
def pieces(self):
return self._pieces
def get_min_time(self):
return self._pieces[0].get_min_time()
def get_max_time(self):
return self._pieces[-1].get_max_time()
def n_pieces(self):
return len(self._pieces)
def n_pieces_longer_than(self, t):
return len([piece for piece in self._pieces if piece.get_duration() > t])
def get_values(self, times):
_datas = []
_times = []
assert (times <= self.get_max_time()).all()
assert (times >= self.get_min_time()).all()
done_times = np.ones(len(times)) > 0.0
for piece in self._pieces:
ind1 = (times.rescale('ms') < float(piece.get_max_time().rescale('ms').magnitude))
ind = np.logical_and(ind1, done_times)
ind_locs = np.where(ind)
_time = times[ind_locs]
_data = piece.get_values(_time)
_datas.append(_data)
_times.append(_time)
# Only visit these times once:
done_times = np.logical_and(done_times, np.logical_not(ind))
unit = _data[0].units
return np.fromiter(itertools.chain(*[list(datum.rescale(unit).magnitude) for datum in _datas]), dtype=np.float) * unit
|
mikehulluk/morphforge
|
src/morphforge/traces/tracetypes/tracepiecewise.py
|
Python
|
bsd-2-clause
| 5,584
|
[
"VisIt"
] |
d53fa769b94d3012dfb00c4d7e213da139ca362904ed782eebb2696edadcceb6
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 20:42:44 2017
@author: Yefee
"""
from .share_constant import *
# sec in siderial day ~ sec
sday = SHR_CONST_SDAY
# earth rot ~ rad/sec
omega = SHR_CONST_OMEGA
# radius of earth ~ m
rearth = SHR_CONST_REARTH
# acceleration of gravity ~ m/s^2
g = SHR_CONST_G
# Stefan-Boltzmann constant ~ W/m^2/K^4
stebol = SHR_CONST_STEBOL
# Boltzmann's constant ~ J/K/molecule
boltz = SHR_CONST_BOLTZ
# Avogadro's number ~ molecules/kmole
avogad = SHR_CONST_AVOGAD
# Universal gas constant ~ J/K/kmole
rgas = SHR_CONST_RGAS
# molecular weight dry air ~ kg/kmole
mwdair = SHR_CONST_MWDAIR
# molecular weight water vapor
mwwv = SHR_CONST_MWWV
# Dry air gas constant ~ J/K/kg
rdair = SHR_CONST_RDAIR
# Water vapor gas constant ~ J/K/kg
rwv = SHR_CONST_RWV
# RWV/RDAIR - 1.0
zvir = SHR_CONST_ZVIR
# Von Karman constant
karman = SHR_CONST_KARMAN
# freezing T of fresh water ~ K (intentionally made == to TKTRIP)
tkfrz = SHR_CONST_TKFRZ
# triple point of fresh water ~ K
tktrip = SHR_CONST_TKTRIP
# density of dry air at STP ~ kg/m^3
rhoair = SHR_CONST_RHODAIR
# density of fresh water ~ kg/m^3
rhofw = SHR_CONST_RHOFW
# density of sea water ~ kg/m^3
rhosw = SHR_CONST_RHOSW
# density of ice ~ kg/m^3
rhoice = SHR_CONST_RHOICE
# specific heat of dry air ~ J/kg/K
cpdair = SHR_CONST_CPDAIR
# specific heat of fresh h2o ~ J/kg/K
cpfw = SHR_CONST_CPFW
# specific heat of sea h2o ~ J/kg/K
cpsw = SHR_CONST_CPSW
# specific heat of water vap ~ J/kg/K
cpwv = SHR_CONST_CPWV
# specific heat of fresh ice ~ J/kg/K
cpice = SHR_CONST_CPICE
# latent heat of fusion ~ J/kg
latice = SHR_CONST_LATICE
# latent heat of evaporation ~ J/kg
latvap = SHR_CONST_LATVAP
# latent heat of sublimation ~ J/kg
latsub = SHR_CONST_LATSUB
# ocn ref salinity (psu)
ocn_ref_sal = SHR_CONST_OCN_REF_SAL
# ice ref salinity (psu)
ice_ref_sal = SHR_CONST_ICE_REF_SAL
# cappa in atmos
cappa = (SHR_CONST_RGAS/SHR_CONST_MWDAIR)/SHR_CONST_CPDAIR #! R/Cp
|
Yefee/xcesm
|
xcesm/config/cesmconstant.py
|
Python
|
apache-2.0
| 2,003
|
[
"Avogadro"
] |
b8c648aa605c7719c7f7b700701cd9b38285d421fbfc394980fe31eca8e0504b
|
import numpy as np
import scipy.signal
from scipy.ndimage.morphology import binary_erosion
from scipy.ndimage.morphology import white_tophat
from scipy.ndimage.filters import gaussian_filter
# dictionary describing options available to tune this algorithm
options = {
"best_size":{"purpose":"The estimate of the peak size, in pixels. If 'auto', attempts to determine automatically. Otherwise, this should be an integer.",
"default":"auto"},
"refine_positions":{"purpose":"TODO",
"default":False},
"sensitivity_threshold":{"purpose":"TODO",
"default":0.34},
"start_search":{"purpose":"TODO",
"default":3},
"end_search":{"purpose":"TODO",
"default":"auto"},
"progress_object":{"purpose":"Object used to present a progress bar to the user. For definition, see UI_interface folder.",
"default":None},
}
def normalise_dynamic_range(image):
image -= image.min()
image /= image.max()
return image
return image - gaussian_filter(image, filter_width)
def get_data_shape(image):
""" Returns data shape as (columns, rows).
Note that this is opposite of standard Numpy/Hyperspy notation. Presumably,
this is to help Lewys keep X and Y in order because Matlab uses column-major indexing.
"""
im_dim = image.shape[::-1]
m, n = im_dim
return m, n
def get_trial_size(image, best_size="auto"):
""" TODO: automatically estimate best box size """
return 19
def get_end_search(image, end_search="auto"):
im_dim = image.shape
if end_search== "auto":
return 2 * np.floor(( float(np.min(im_dim)) / 8) / 2) - 1
else:
return end_search
def fit_block(block, base_axis):
A = np.vstack([base_axis**2 , base_axis , np.ones(base_axis.size)]).T
h_profile = np.sum(block, axis=0)
v_profile = np.sum(block, axis=1)
solution_h = np.linalg.lstsq(A, np.log(h_profile))[0]
solution_v = np.linalg.lstsq(A, np.log(v_profile))[0]
y = -solution_v[1]/solution_v[0]/2.0
x = -solution_h[1]/solution_h[0]/2.0
height = ( h_profile.max() + v_profile.max() ) / 2.0
spread = np.sqrt((np.abs(solution_h[0])+np.abs(solution_v[0])) / 4.0)
return y, x, height, spread
# Feature identification section:
def filter_peaks(normalized_heights, spread, offset_radii, trial_size, sensitivity_threshold):
# Normalise distances and heights:
normalized_heights[normalized_heights < 0] = 0 # Forbid negative (concave) Gaussians.
spread /= trial_size
spread(spread > sqrt(2)) = sqrt(2) ;
spread(spread == 0) = sqrt(2) ;
offset_radii = offset_radii / trial_size
offset_radii[offset_radii == 0] = 0.001 # Remove zeros values to prevent division error later.
# Create search metric and screen impossible peaks:
search_record = normalized_heights / offset_radii
search_record /= 100.0
search_record[search_record > 1] = 1
search_record[spread < 0.5] = 0 # Invalidates negative Gaussian widths.
search_record[spread > 1] = 0 # Invalidates Gaussian widths greater than a feature spacing.
search_record[offset_radii > 1] = 0 # Invalidates Gaussian widths greater than a feature spacing.
kernel = int(np.round(trial_size/3))
if kernel % 2 == 0:
kernel += 1
search_record = scipy.signal.medfilt2d(search_record, kernel) # Median filter to strip impossibly local false-positive features.
search_record[search_record < sensitivity_threshold ] = 0 # Collapse improbable features to zero likelyhood.
search_record[search_record >= sensitivity_threshold ] = 1 # Round likelyhood of genuine features to unity.
# Erode regions of likely features down to points.
search_record = binary_erosion(search_record, iterations=-1 )
y, x = np.where(search_record==1)
return np.vstack((y,x)).T # Extract the locations of the identified features.
def peak_find(image,
best_size="auto",
refine_positions=False,
sensitivity_threshold=33,
start_search=3,
end_search="auto",
progress_object=None):
"""
Parameters
----------
refine_position : bool
ddf
"""
# TODO: best_size needs its auto-estimation routine
trial_size = get_trial_size(best_size)
# Removes slowly varying background from image to simplify Gaussian fitting.
input_offset = white_tophat(image, 2*trial_size)
# image dimension sizes, used for loop through image pixels
m, n = get_data_shape(image)
big = get_end_search(image, end_search)
# Create blank arrays.
heights = np.empty(image.shape, dtype=np.float32)
spreads = np.empty(image.shape, dtype=np.float32)
xs = np.empty(image.shape, dtype=np.float32)
ys = np.empty(image.shape, dtype=np.float32)
# Half of the trial size, equivalent to the border that will not be inspected.
test_box_padding = int(( trial_size - 1 ) / 2.)
# Coordinate set for X and Y fitting.
base_axis = np.arange(-test_box_padding, test_box_padding+1., dtype=np.float32)
# Followed by the restoration progress bar:
if progress_object is not None:
progress_object.set_title("Identifying Image Peaks...")
progress_object.set_position(0)
for i in range(test_box_padding + 1 , m - ( test_box_padding + 1 )):
currentStrip = input_offset[ i - test_box_padding : i + test_box_padding +1]
for j in range( test_box_padding + 1, n - ( test_box_padding + 1 )):
I = currentStrip[:, j - test_box_padding : j + test_box_padding + 1]
y, x, height, spread = fit_block(I, base_axis)
ys[i, j] = y
xs[i, j] = x
heights[i, j] = height
spreads[i, j] = spread
if progress_object is not None:
percentage_refined = (((trial_size-3.)/2.) / ((big-1.)/2.)) + (((i-test_box_padding) / (m - 2*test_box_padding)) / (((big-1)/2))) # Progress metric when using a looping peak-finding waitbar.
progress_object.set_position(percentage_refined)
# normalize peak heights
heights = heights / ( np.max(input_offset) - np.min(input_offset) )
# normalize fitted Gaussian widths
spreads = spreads / trial_size
offset_radii = np.sqrt(ys**2 + xs**2) # Calculate offset radii.
return filter_peaks(heights, spreads, offset_radii, trial_size, sensitivity_threshold)
|
AbsoluteIntegrator/Absolute_Integrator
|
Absolute_Integrator/peak_finding/ranger.py
|
Python
|
mit
| 6,616
|
[
"Gaussian"
] |
ea1395bfc041f351d50fad7a3961bbe20ca621936e8afb8c1ef5e414d9139d3d
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from django.views import defaults as default_views
from gobgift.api.views import FacebookLogin, GoogleLogin
from gobgift.core.views import home, logout, done, app, privacy_policy
from gobgift.groups.views import ListGroupAutocomplete
from gobgift.groups.views import UserAutocomplete
urlpatterns = [
path('', home, name="home"),
path('app/', app, name="app"),
path('login/', home),
path('logout/', logout),
path('done/', done, name='done'),
path('privacy/', privacy_policy, name='privacy'),
path(settings.ADMIN_URL, admin.site.urls),
path(r'accounts', include('allauth.urls')),
path(r'lists/', include(('gobgift.wishlists.urls', 'gobgift.wishlists'), namespace='lists')),
path(r'gifts/', include(('gobgift.gifts.urls', 'gobgift.gifts'), namespace='gifts')),
path(r'groups/', include(('gobgift.groups.urls', 'gobgift.groups'), namespace='groups')),
# DjangoRestFramework
path(r'api/', include(('gobgift.api.urls', 'gobgift.api'), namespace='api')),
path(r'api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path(r'rest-auth/', include('rest_auth.urls')),
path(r'rest-auth/facebook/', FacebookLogin.as_view(), name='fb_login'),
path(r'rest-auth/google/', GoogleLogin.as_view(), name='rest_google_login'),
# Django autocomplete light
path(r'user-autocomplete/', UserAutocomplete.as_view(), name="user-autocomplete"),
path(r'listgroup-autocomplete/', ListGroupAutocomplete.as_view(), name="listgroup-autocomplete"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path('400/', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
path('403/', default_views.permission_denied,
kwargs={'exception': Exception('Permission Denied')}),
path('404/', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
path('500/', default_views.server_error),
path('__debug__/', include(debug_toolbar.urls)),
# url(r'^docs/$', serve, {'document_root': settings.DOCS_ROOT, 'path': 'index.html'}),
# url(r'^docs/(?P<path>.*)$', serve, {'document_root': settings.DOCS_ROOT}),
]
|
kimond/gobgift
|
config/urls.py
|
Python
|
mpl-2.0
| 2,608
|
[
"VisIt"
] |
da2280bc8230d4e469854236373079ccd6814d1c79cf7a668b7c472be4a0b4e4
|
import os
import sys
import setuptools
import shutil
__where__ = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(__where__, 'exfoliate', 'VERSION.txt'), 'rb') as f:
__version__ = f.read().decode('ascii').strip()
# would prefer to just use markdown, but not compatible with setuptools; see the following Github
# issue for more information: https://github.com/pypa/packaging-problems/issues/46
with open(os.path.join(__where__, 'DESCRIPTION.rst'), 'rb') as f:
long_description = f.read().decode('utf-8').strip()
class TestCommand(setuptools.Command):
description = 'Test the exfoliate package with pytest.'
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
import pytest
exit_code = pytest.main(['tests.py', ])
sys.exit(exit_code)
class PublishCommand(setuptools.Command):
description = 'Build and publish the package.'
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
try:
print('removing previous builds...')
shutil.rmtree(os.path.join(__where__, 'dist'))
except FileNotFoundError:
pass
print('building source and wheel (universal) distribution...')
os.system(f'{sys.executable} setup.py sdist bdist_wheel --universal')
print('uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
sys.exit()
class BenchmarkCommand(setuptools.Command):
description = 'Benchmark exfoliate against pure aiohttp.'
user_options = []
def initialize_options(self): pass
def finalize_options(self): pass
def run(self):
os.system(f'{sys.executable} benchmark.py')
sys.exit()
setuptools.setup(
name='exfoliate',
version=__version__,
description='The asynchronous Python HTTP client for developers who prefer synchronous Python.',
long_description=long_description,
author='Brian J Petersen',
author_email='brianjpetersen@gmail.com',
url='https://github.com/brianjpetersen/exfoliate',
#py_modules=['exfoliate', ],
packages=setuptools.find_packages(),
install_requires=['aiohttp>=2.2.5', ],
tests_require=['pytest', 'requests', ],
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
],
cmdclass={
'publish': PublishCommand,
'test': TestCommand,
'benchmark': BenchmarkCommand,
},
)
|
brianjpetersen/exfoliate
|
setup.py
|
Python
|
mit
| 2,763
|
[
"Brian"
] |
3234f641dd2910980ca5d7de6dc5172ceaa12305cdcd4044364d9180627c80e0
|
#! /bin/env python
import os
import xml.dom.minidom
from collections import namedtuple
from xml.etree.ElementTree import ElementTree
import numpy as np
from six.moves import xrange
from ...grids import UnstructuredField
from .vtk import (
InvalidEncodingError,
InvalidFormatError,
assemble_vtk_elements,
valid_encodings,
valid_formats,
)
from .vtktypes import VtkPolygon, edge_count_to_type, vtk_to_np_type
from .vtkxml import (
VtkAppendedDataElement,
VtkCellDataElement,
VtkCellsElement,
VtkGridElement,
VtkPieceElement,
VtkPointDataElement,
VtkPointsElement,
VtkRootElement,
)
class Error(Exception):
pass
class MissingAttributeError(Error):
pass
class MissingElementError(Error):
pass
def get_vtk_types(grid):
offsets = grid.get_offset()
edge_count = np.diff(offsets)
types = [int(edge_count_to_type.get(offsets[0], VtkPolygon))]
for count in edge_count:
types.append(int(edge_count_to_type.get(count, VtkPolygon)))
return np.array(types, dtype=np.uint8)
def get_vtu_elements(field, data_format="ascii", encoding="ascii"):
if data_format == "appended":
data = VtkAppendedDataElement("", encoding=encoding)
else:
data = None
coords = field.get_coordinate(range(field.get_dim_count()))
types = get_vtk_types(field)
element = {
"VTKFile": VtkRootElement("UnstructuredGrid"),
"Grid": VtkGridElement("UnstructuredGrid"),
"Piece": VtkPieceElement(
NumberOfPoints=field.get_point_count(), NumberOfCells=field.get_cell_count()
),
"Points": VtkPointsElement(coords, append=data, encoding=encoding),
"PointData": VtkPointDataElement(
field.get_point_fields(), append=data, encoding=encoding
),
"Cells": VtkCellsElement(
field.get_connectivity(),
field.get_offset(),
types,
append=data,
encoding=encoding,
),
"CellData": VtkCellDataElement(
field.get_cell_fields(), append=data, encoding=encoding
),
}
if data is not None:
element["AppendedData"] = data
return element
def tofile(field, path, **kwds):
"""Write a field-like object to a VTK file.
Parameters
----------
field : field-like
Field to write to vtk.
path : str
Path to the file to write.
format : {'ascii', 'binary', 'appended'}
Format in which data is stored in the vtk file.
encoding : {'base64', 'raw'}
Encoding method for data in vtk file.
Notes
-----
The *field* object must implement all of the following methods:
* get_point_count
* get_cell_count
* get_connectivity
* get_offset
* get_cell_fields
* get_point_fields
"""
data_format = kwds.get("format", "ascii")
encoding = kwds.get("encoding", "ascii")
if data_format not in valid_formats:
raise InvalidFormatError(data_format)
if encoding not in valid_encodings:
raise InvalidEncodingError(encoding)
if data_format == "ascii":
encoding = "ascii"
doc = xml.dom.minidom.Document()
elements = get_vtu_elements(field, data_format=data_format, encoding=encoding)
doc.appendChild(assemble_vtk_elements(elements))
with open(path, "w") as f:
f.write(doc.toprettyxml())
class IDatabase(object):
def __init__(self):
pass
def open(self, path, var_name):
pass
def write(self, field):
pass
def close(self):
pass
class Database(IDatabase):
def __init__(self):
self._var_name = ""
self._path = ""
self._template = ""
super(Database, self).__init__()
def open(self, path, var_name):
try:
self.close()
(root, ext) = os.path.splitext(path)
self._var_name = var_name
self._path = path
self._template = "%s_%%04d%s" % (root, ext)
except Exception as e:
print("Unable to open database: %s" % e)
def write(self, field, **kwargs):
file_name = self._next_file_name()
tofile(field, file_name, **kwargs)
def _next_file_name(self):
try:
next_file_name = self._template % self._count
except AttributeError:
self._count = 0
next_file_name = self._template % self._count
finally:
self._count += 1
return next_file_name
def close(self):
try:
del self._count
except AttributeError:
pass
class VtkDatabase(object):
def __init__(self, field):
self._field = field
self._count = 0
def tofile(self, path, **kwargs):
(base, filename) = os.path.split(path)
(root, ext) = os.path.splitext(filename)
next_file = "%s_%04d%s" % (root, self._count, ext)
tofile(self._field, os.path.join(base, next_file), **kwargs)
self._count += 1
DataArray = namedtuple("DataArray", ["name", "data"])
Piece = namedtuple("Piece", ["points", "cells", "point_data", "cell_data"])
Point = namedtuple("Point", ["x", "y", "z"])
Cell = namedtuple("Cell", ["connectivity", "offsets", "types"])
def parse_data_array(data_array):
if data_array.tag != "DataArray":
raise ValueError("not a DataArray")
name = data_array.get("Name")
noc = data_array.get("NumberOfComponents", default="1")
data_type = data_array.get("type")
data_format = data_array.get("format", default="ascii")
noc = int(noc)
if data_format != "ascii":
raise ValueError("format is not ascii")
data = [float(val) for val in data_array.text.split()]
array = np.array(data, dtype=vtk_to_np_type[data_type])
components = []
for i in range(noc):
components.append(array[i::noc])
return DataArray(name, components)
def parse_all_data_array(element):
d = {}
for data_array in element.findall("DataArray"):
data = parse_data_array(data_array)
d[data.name] = data.data
return d
def parse_points(points):
if points.tag != "Points":
raise ValueError("not a Points element")
data = parse_data_array(points.find("DataArray"))
n_points = len(data.data[0])
components = data.data
for _ in xrange(3 - len(data.data)):
components.append(np.zeros(n_points))
return Point(components[0], components[1], components[2])
def parse_cells(cells):
if cells.tag != "Cells":
raise ValueError("not a Cells element")
d = parse_all_data_array(cells)
for (key, value) in d.items():
d[key] = value[0]
return Cell(**d)
def parse_piece(piece):
if piece.tag != "Piece":
raise ValueError("not a Piece element")
# cell_count = int(piece.get("NumberOfCells"))
# point_count = int(piece.get("NumberOfPoints"))
points = parse_points(piece.find("Points"))
cells = parse_cells(piece.find("Cells"))
point_data = parse_all_data_array(piece.find("PointData"))
cell_data = parse_all_data_array(piece.find("CellData"))
return Piece(points=points, cells=cells, point_data=point_data, cell_data=cell_data)
def fromfile(source):
"""
Parameters
----------
source: str or file-like
Name of file or file object containing XML data.
Returns
-------
UnstructuredField
The parsed data field.
"""
tree = ElementTree()
tree.parse(source)
root = tree.getroot()
if root.tag != "VTKFile":
raise MissingElementError("Root element must be 'VTKFile'")
data_type = root.get("type")
version = root.get("version")
byte_order = root.get("byte_order")
if data_type is None or version is None or byte_order is None:
raise MissingAttributeError()
# data = root.find ('AppendedData')
# if data is not None:
# appended_data = data.text
# #appended_data = decode(data.text, encoding=data.get('encoding',
# # 'base64'))
grid = root.find(data_type)
piece = parse_piece(grid.find("Piece"))
points = piece.points
cells = piece.cells
field = UnstructuredField(points.x, points.y, cells.connectivity, cells.offsets)
for (key, val) in piece.point_data.items():
field.add_field(key, val, centering="point")
for (key, val) in piece.cell_data.items():
field.add_field(key, val, centering="zonal")
return field
|
csdms/coupling
|
deprecated/printers/vtk/vtu.py
|
Python
|
mit
| 8,567
|
[
"VTK"
] |
0c57a41352534869a8cf505b46aa9cd2b69848d9b915e496af399cb648bb4118
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyGpaw(PythonPackage):
"""GPAW is a density-functional theory (DFT) Python code based on the
projector-augmented wave (PAW) method and the atomic simulation environment
(ASE)."""
homepage = "https://wiki.fysik.dtu.dk/gpaw/index.html"
url = "https://pypi.io/packages/source/g/gpaw/gpaw-1.3.0.tar.gz"
version('1.3.0', '82e8c80e637696248db00b5713cdffd1')
variant('mpi', default=True, description='Build with MPI support')
variant('scalapack', default=False,
description='Build with ScaLAPACK support')
variant('fftw', default=True, description='Build with FFTW support')
depends_on('mpi', when='+mpi', type=('build', 'link', 'run'))
depends_on('python@2.6:')
depends_on('py-ase@3.13.0:', type=('build', 'run'))
depends_on('py-numpy +blas +lapack', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('libxc')
depends_on('blas')
depends_on('lapack')
depends_on('fftw+mpi', when='+fftw +mpi')
depends_on('fftw~mpi', when='+fftw ~mpi')
depends_on('scalapack', when='+scalapack')
def patch(self):
spec = self.spec
# For build notes see https://wiki.fysik.dtu.dk/gpaw/install.html
libxc = spec['libxc']
blas = spec['blas']
lapack = spec['lapack']
libs = blas.libs + lapack.libs + libxc.libs
include_dirs = [
blas.prefix.include,
lapack.prefix.include,
libxc.prefix.include
]
if '+mpi' in spec:
libs += spec['mpi'].libs
mpi_include_dirs = repr([spec['mpi'].prefix.include])
mpi_library_dirs = repr(list(spec['mpi'].libs.directories))
include_dirs.append(spec['mpi'].prefix.include)
if '+scalapack' in spec:
libs += spec['scalapack'].libs
include_dirs.append(spec['scalapack'].prefix.include)
scalapack_macros = repr([
('GPAW_NO_UNDERSCORE_CBLACS', '1'),
('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')
])
if '+fftw' in spec:
libs += spec['fftw'].libs
include_dirs.append(spec['fftw'].prefix.include)
lib_dirs = list(libs.directories)
libs = list(libs.names)
rpath_str = ':'.join(self.rpath)
with open('customize.py', 'w') as f:
f.write("libraries = {0}\n".format(repr(libs)))
f.write("include_dirs = {0}\n".format(repr(include_dirs)))
f.write("library_dirs = {0}\n".format(repr(lib_dirs)))
f.write(
"extra_link_args += ['-Wl,-rpath={0}']\n".format(rpath_str)
)
if '+mpi' in spec:
f.write("define_macros += [('PARALLEL', '1')]\n")
f.write("compiler='{0}'\n".format(spec['mpi'].mpicc))
f.write("mpicompiler = '{0}'\n".format(spec['mpi'].mpicc))
f.write("mpi_include_dirs = {0}\n".format(mpi_include_dirs))
f.write("mpi_library_dirs = {0}\n".format(mpi_library_dirs))
else:
f.write("compiler='{0}'\n".format(self.compiler.cc))
f.write("mpicompiler = None\n")
if '+scalapack' in spec:
f.write("scalapack = True\n")
f.write("define_macros += {0}\n".format(scalapack_macros))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/py-gpaw/package.py
|
Python
|
lgpl-2.1
| 4,617
|
[
"ASE",
"GPAW"
] |
0563ef47ee1053cca39931f3ff53d3435ea9d872ba14bd041b66b5dd34f67f0e
|
import functools
from nose.tools import raises
from unittest import TestCase
from wadl2rst.nodes.base import BaseNode
class TestBaseNode(TestCase):
def setUp(self):
self.single_node = FakeNode(None, "test", {})
self.parent_node = FakeNode(None, "parent", {})
self.child_node = FakeChildNode(self.parent_node, "child", {})
self.parent_node.children.append(self.child_node)
def test_should_remove_child(self):
self.parent_node.remove_child(self.child_node)
self.assertEquals(0, len(self.parent_node.children))
def test_should_find_first_no_results(self):
result = self.parent_node.find_first("no")
self.assertEquals(result, None)
def test_should_clone_attributes(self):
clone = self.parent_node.clone()
self.assertEquals(self.parent_node.name, clone.name)
self.assertEquals(self.child_node.name, clone.children[0].name)
def test_clone_should_have_different_pointers(self):
clone = self.parent_node.clone()
self.assertNotEquals(id(self.parent_node), id(clone))
self.assertNotEquals(id(self.child_node), id(clone.children[0]))
def test_should_find_nodes(self):
nodes = self.parent_node.find("child")
self.assertIn(self.child_node, nodes)
def test_should_find_each_nodes(self):
other_child = FakeChildNode(self.parent_node, "other_child", {})
self.parent_node.children.append(other_child)
nodes = self.parent_node.find_each(["child", "other_child"])
self.assertIn(self.child_node, nodes)
self.assertIn(other_child, nodes)
def test_visitor_should_visit_nodes(self):
nodes = []
func = functools.partial(fake_visit, nodes)
self.parent_node.visit(func)
self.assertIn(self.parent_node, nodes)
self.assertIn(self.child_node, nodes)
def test_find_one_of(self):
actual = self.parent_node.find_one_of(["child"])
self.assertEquals(actual, self.child_node)
def test_repr(self):
expected = "<FakeNode name='parent' attributes='{}'>"
self.assertEquals(expected, str(self.parent_node))
@raises(ValueError)
def test_find_one_of_not_exists(self):
self.parent_node.find_one_of(["not_real"])
#
# Fakes for Testing Purposes
#
class FakeNode(BaseNode):
template = "hello{{child_rst}}"
class FakeChildNode(BaseNode):
template = " world"
class FakeAttributeNode(BaseNode):
template = "{{attributes['foo']}}"
def fake_visit(memory, node):
memory.append(node)
|
annegentle/wadl2rst
|
test/unit/nodes/base_test.py
|
Python
|
apache-2.0
| 2,571
|
[
"VisIt"
] |
a0a30d89069afe4be9820c1ac89bc4aa1e2b3f0f7f42194048eb37f91618547f
|
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
# Gn theory.
import re
import os
import math
import warnings
import psi4
import p4const
import p4util
from driver import *
#from extend_Molecule import *
from molutil import *
from p4regex import *
# never import aliases into this file
def run_gaussian_2(name, **kwargs):
# throw an exception for open-shells
if (psi4.get_option('SCF','REFERENCE') != 'RHF' ):
raise ValidationError("""g2 computations require "reference rhf".""")
# stash user options:
optstash = p4util.OptionsState(
['FNOCC','COMPUTE_TRIPLES'],
['FNOCC','COMPUTE_MP4_TRIPLES'],
['FREEZE_CORE'],
['SCF','SCF_TYPE'])
# override default scf_type
psi4.set_local_option('SCF','SCF_TYPE','OUT_OF_CORE')
# optimize geometry at scf level
psi4.clean()
psi4.set_global_option('BASIS',"6-31G(D)")
optimize('scf')
psi4.clean()
# scf frequencies for zpe
frequency('scf')
# thermodynamic properties
du = psi4.get_variable('INTERNAL ENERGY CORRECTION')
dh = psi4.get_variable('ENTHALPY CORRECTION')
dg = psi4.get_variable('GIBBS FREE ENERGY CORRECTION')
ref = psi4.wavefunction()
freqs = ref.frequencies()
nfreq = freqs.dim(0)
freqsum = 0.0
for i in range (0,nfreq):
freqsum += freqs.get(i)
zpe = freqsum / p4const.psi_hartree2wavenumbers * 0.8929 * 0.5
psi4.clean()
# optimize geometry at mp2 (no frozen core) level
# note: freeze_core isn't an option in MP2
psi4.set_global_option('FREEZE_CORE',"FALSE")
optimize('conv-mp2')
psi4.clean()
# qcisd(t)
psi4.set_local_option('FNOCC','COMPUTE_MP4_TRIPLES',"TRUE")
psi4.set_global_option('FREEZE_CORE',"TRUE")
psi4.set_global_option('BASIS',"6-311G(D_P)")
run_fnocc('qcisd(t)',**kwargs)
# HLC: high-level correction based on number of valence electrons
ref = psi4.wavefunction()
nirrep = ref.nirrep()
frzcpi = ref.frzcpi()
nfzc = 0
for i in range (0,nirrep):
nfzc += frzcpi[i]
nalpha = ref.nalpha() - nfzc
nbeta = ref.nbeta() - nfzc
# hlc of gaussian-2
hlc = -0.00481 * nalpha -0.00019 * nbeta
# hlc of gaussian-1
hlc1 = -0.00614 * nalpha
eqci_6311gdp = psi4.get_variable("QCISD(T) TOTAL ENERGY")
emp4_6311gd = psi4.get_variable("MP4 TOTAL ENERGY")
emp2_6311gd = psi4.get_variable("MP2 TOTAL ENERGY")
psi4.clean()
# correction for diffuse functions
psi4.set_global_option('BASIS',"6-311+G(D_P)")
energy('mp4')
emp4_6311pg_dp = psi4.get_variable("MP4 TOTAL ENERGY")
emp2_6311pg_dp = psi4.get_variable("MP2 TOTAL ENERGY")
psi4.clean()
# correction for polarization functions
psi4.set_global_option('BASIS',"6-311G(2DF_P)")
energy('mp4')
emp4_6311g2dfp = psi4.get_variable("MP4 TOTAL ENERGY")
emp2_6311g2dfp = psi4.get_variable("MP2 TOTAL ENERGY")
psi4.clean()
# big basis mp2
psi4.set_global_option('BASIS',"6-311+G(3DF_2P)")
run_fnocc('_mp2',**kwargs)
emp2_big = psi4.get_variable("MP2 TOTAL ENERGY")
psi4.clean()
eqci = eqci_6311gdp
e_delta_g2 = emp2_big + emp2_6311gd - emp2_6311g2dfp - emp2_6311pg_dp
e_plus = emp4_6311pg_dp - emp4_6311gd
e_2df = emp4_6311g2dfp - emp4_6311gd
eg2 = eqci + e_delta_g2 + e_plus + e_2df
eg2_mp2_0k = eqci + (emp2_big - emp2_6311gd) + hlc + zpe
psi4.print_out('\n')
psi4.print_out(' ==> G1/G2 Energy Components <==\n')
psi4.print_out('\n')
psi4.print_out(' QCISD(T): %20.12lf\n' % eqci)
psi4.print_out(' E(Delta): %20.12lf\n' % e_delta_g2)
psi4.print_out(' E(2DF): %20.12lf\n' % e_2df)
psi4.print_out(' E(+): %20.12lf\n' % e_plus)
psi4.print_out(' E(G1 HLC): %20.12lf\n' % hlc1)
psi4.print_out(' E(G2 HLC): %20.12lf\n' % hlc)
psi4.print_out(' E(ZPE): %20.12lf\n' % zpe)
psi4.print_out('\n')
psi4.print_out(' ==> 0 Kelvin Results <==\n')
psi4.print_out('\n')
eg2_0k = eg2 + zpe + hlc
psi4.print_out(' G1: %20.12lf\n' % (eqci + e_plus + e_2df + hlc1 + zpe))
psi4.print_out(' G2(MP2): %20.12lf\n' % eg2_mp2_0k)
psi4.print_out(' G2: %20.12lf\n' % eg2_0k)
psi4.set_variable("G1 TOTAL ENERGY",eqci + e_plus + e_2df + hlc1 + zpe)
psi4.set_variable("G2 TOTAL ENERGY",eg2_0k)
psi4.set_variable("G2(MP2) TOTAL ENERGY",eg2_mp2_0k)
psi4.print_out('\n')
T = psi4.get_global_option('T')
psi4.print_out(' ==> %3.0lf Kelvin Results <==\n'% T)
psi4.print_out('\n')
internal_energy = eg2_mp2_0k + du - zpe / 0.8929
enthalpy = eg2_mp2_0k + dh - zpe / 0.8929
gibbs = eg2_mp2_0k + dg - zpe / 0.8929
psi4.print_out(' G2(MP2) energy: %20.12lf\n' % internal_energy )
psi4.print_out(' G2(MP2) enthalpy: %20.12lf\n' % enthalpy)
psi4.print_out(' G2(MP2) free energy: %20.12lf\n' % gibbs)
psi4.print_out('\n')
psi4.set_variable("G2(MP2) INTERNAL ENERGY",internal_energy)
psi4.set_variable("G2(MP2) ENTHALPY",enthalpy)
psi4.set_variable("G2(MP2) FREE ENERGY",gibbs)
internal_energy = eg2_0k + du - zpe / 0.8929
enthalpy = eg2_0k + dh - zpe / 0.8929
gibbs = eg2_0k + dg - zpe / 0.8929
psi4.print_out(' G2 energy: %20.12lf\n' % internal_energy )
psi4.print_out(' G2 enthalpy: %20.12lf\n' % enthalpy)
psi4.print_out(' G2 free energy: %20.12lf\n' % gibbs)
psi4.set_variable("CURRENT ENERGY",eg2_0k)
psi4.set_variable("G2 INTERNAL ENERGY",internal_energy)
psi4.set_variable("G2 ENTHALPY",enthalpy)
psi4.set_variable("G2 FREE ENERGY",gibbs)
psi4.clean()
optstash.restore()
# return 0K g2 results
return eg2_0k
# aliases for g2
procedures['energy']['gaussian-2'] = run_gaussian_2
procedures['energy']['g2'] = run_gaussian_2
|
spring01/libPSI
|
lib/python/gaussian_n.py
|
Python
|
gpl-2.0
| 6,843
|
[
"Gaussian",
"Psi4"
] |
96ed6a44ebacb70627f1ee5986bbac5064356fd57333db0aed833bd3ea999365
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.