text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import unittest2
import datetime
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from google.appengine.ext.webapp import Response
# from controllers.datafeed_controller import UsfirstEventDetailsGet
from models.event import Event
from models.team import Team
class TestUsfirstEventDetailsGet(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
def tearDown(self):
self.testbed.deactivate()
# def test_get(self):
# test with 2011ct
# usfirsteventget = UsfirstEventDetailsGet()
# usfirsteventget.response = Response()
# usfirsteventget.get(2011, "5561")
#
# check event object got created
# event = Event.get_by_id("2011ct")
# self.assertEqual(event.name, "Northeast Utilities FIRST Connecticut Regional")
# self.assertEqual(event.event_type, "Regional")
# self.assertEqual(event.start_date, datetime.datetime(2011, 3, 31, 0, 0))
# self.assertEqual(event.end_date, datetime.datetime(2011, 4, 2, 0, 0))
# self.assertEqual(event.year, 2011)
# self.assertEqual(event.venue_address, "Connecticut Convention Center\r\n100 Columbus Blvd\r\nHartford, CT 06103\r\nUSA")
# self.assertEqual(event.website, "http://www.ctfirst.org/ctr")
# self.assertEqual(event.event_short, "ct")
#
# check team objects get created for missing teams
# frc177 = Team.get_by_id("frc177")
# self.assertEqual(frc177.team_number, 177)
# self.assertEqual(frc177.first_tpid, 41633)
|
verycumbersome/the-blue-alliance
|
tests/test_datafeed_controller.py
|
Python
|
mit
| 1,879
|
[
"COLUMBUS"
] |
13e113bede23f1f93bfb08045e631d60a0192039ada0d5da3ac27810a8922a6b
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.dustpedia.core.database import DustPediaDatabase, get_account
from pts.dustpedia.core.sample import DustPediaSample
from pts.core.tools import filesystem as fs
from pts.core.basics.log import log
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
# -----------------------------------------------------------------
# Create configuration
definition = ConfigurationDefinition()
definition.add_required("galaxy_name", "string", "name of the galaxy (will be resolved)")
config = parse_arguments("get_headers", definition)
# -----------------------------------------------------------------
# Create the DustPedia sample
sample = DustPediaSample()
galaxy_name = sample.get_name(config.galaxy_name)
# Create the database
database = DustPediaDatabase()
# Login
username, password = get_account()
database.login(username, password)
# Loop over the images for M81
filters = database.get_image_names_and_filters(galaxy_name)
for name in filters:
# Get the filter
fltr = filters[name]
# Inform the user
log.info("Getting the header for the '" + str(fltr) + "' filter ...")
# Get the header
header = database.get_header(galaxy_name, name)
# Determine path
header_path = fs.join(path, str(fltr) + ".txt")
# Save the header
header.totextfile(header_path)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/tests/M81/get_headers.py
|
Python
|
agpl-3.0
| 1,867
|
[
"Galaxy"
] |
4ca513dfbbe302a5d4094dc24ea5a9afa4931a3666cd61f3a7509372d283e135
|
""" Kernel module
Defines basic kernel functions of the form::
funcname(x_p, x_q, par) that return a covariance matrix.
x_p is n1*d, x_q is n2*d, the result should be n1*n2
par can be a scalar, array, or list of these.
All kernels must allow x_q=None, and efficiently compute the diagonal
of K(x_p, x_p) as a (n1,) shaped vector.
Multi-task kernels must begin with ``mt_``, and such kernels must use the
last dimension of x_p and x_q as an indicator of the task.
This file also contains code for composing these kernels into multi-use
objects.
"""
import numpy as np
from scipy.spatial.distance import cdist
from dora.regressors.gp import predict
from dora.regressors.gp.types import Range
from dora.regressors.gp import train
import logging
log = logging.getLogger(__name__)
def gaussian(x_p, x_q, LS):
# The 'squared exponential' gaussian radial basis function kernel.
# This kernel is known to be smooth, differentiable and stationary.
if x_q is None:
return np.ones(x_p.shape[0])
deltasq = cdist(x_p/LS, x_q/LS, 'sqeuclidean')
value = np.exp(-0.5 * deltasq)
return value
def laplace(x_p, x_q, LS):
if x_q is None:
return np.ones(x_p.shape[0])
deltasq = cdist(x_p / np.sqrt(LS), x_q / np.sqrt(LS), 'sqeuclidean')
value = np.exp(- deltasq)
return value
def sin(x_p, x_q, params):
# The gaussian-enveloped sinusoidal kernel is good for modeling locally
# oscillating series.
if x_q is None:
return np.ones(x_p.shape[0])
freq, LS = params
deltasq = cdist(x_p/LS, x_q/LS, 'sqeuclidean')
value = np.exp(-0.5 * deltasq)*np.cos(np.sqrt(deltasq))
return value
def matern3on2(x_p, x_q, LS):
# The Matern 3/2 kernel is often used as a less smooth alternative to the
# gaussian kernel for natural data.
if x_q is None:
return np.ones(x_p.shape[0])
r = cdist(x_p/LS, x_q/LS, 'euclidean')
value = (1.0 + r)*np.exp(-r)
return value
def chisquare(x_p, x_q, eps=1e-5):
if x_q is None:
return np.ones(x_p.shape[0])
x_pd = x_p[:, np.newaxis, :]
x_qd = x_q[np.newaxis, :, :]
return 2 * (x_pd * x_qd / (x_pd + x_qd + eps)).sum(axis=-1)
def non_stationary(x_p, x_q, params):
""" Implementation of Paciorek's kernel where length scale is defined as
a continuous function L(x), and computed by operations on L(x1) and L(x2)
Note - we globally apply ARD scaling, then inside the scaled space apply an
isotropic non-stationary treatment according to L(x)
Arguments:
x_p, x_q : n*d x-values
params(list): [ (d,) np array of length-scale multipliers scaling the...,
function L(x) ]
"""
assert(x_p.ndim == 2)
if x_q is None:
# Just want the magnitude to be evaluated - its always 1 :)
return np.ones(x_p.shape[0])
LS_mult, LS_func = params # unpack parameters
dims = x_p.shape[1]
ls_p = LS_func(x_p)
ls_q = LS_func(x_q)
if dims > 1:
assert(LS_mult.shape[0] == dims)
assert(len(LS_mult.shape) == 1)
assert(len(ls_p.shape) == 1)
ls_p = ls_p[:, np.newaxis]
ls_q = ls_q[:, np.newaxis]
sig_mult = LS_mult**2
sig_p = ls_p**2
sig_q = ls_q**2
sig_avg = 0.5*(sig_p + sig_q.T)
ls_avg = sig_avg**0.5
# In the diagonal [axis aligned] case, the determinants of the length
# scales are simply their products
det_sig_mult = np.prod(sig_mult)
# todo(AL) can probably remove det_sig_mult altogether as it should cancel
dets_p = det_sig_mult * sig_p**dims
dets_q = det_sig_mult * sig_q**dims
dets_avg = det_sig_mult * sig_avg**dims
# Compute non-stationary kernel
gain = np.sqrt(np.sqrt(dets_p * dets_q.T) / dets_avg)
return gain * np.exp(-cdist(x_p/LS_mult, x_q/LS_mult, 'sqeuclidean')
/ ls_avg**2)
def tree1D(x_p, x_q, params):
""" Implementation of Paciorek's kernel where length scale is defined as
a continuous function L(x), and computed by operations on L(x1) and L(x2)
Note - we globally apply ARD scaling, then inside the scaled space apply an
isotropic non-stationary treatment according to L(x)
Arguments:
x_p, x_q : n*d x-values
params(list): [ (d,) np array of length-scale multipliers scaling the...,
function L(x) ]
"""
assert(x_p.ndim == 2)
if x_q is None:
# Just want the magnitude to be evaluated - its always 1 :)
return np.ones(x_p.shape[0])
LS_mult, LS_func = params # unpack parameters
dims = x_p.shape[1]
ls_p = LS_func(x_p)
ls_q = LS_func(x_q)
if dims > 1:
assert(LS_mult.shape[0] == 1)
assert(len(LS_mult.shape) == 1)
assert(len(ls_p.shape) == 1)
ls_p = ls_p[:, np.newaxis]
ls_q = ls_q[:, np.newaxis]
sig_mult = LS_mult**2
sig_p = ls_p**2
sig_q = ls_q**2
sig_avg = 0.5*(sig_p + sig_q.T)
ls_avg = sig_avg**0.5
# In the diagonal [axis aligned] case, the determinants of the length
# scales are simply their products
det_sig_mult = np.prod(sig_mult)
# todo(AL) can probably remove det_sig_mult altogether as it should cancel
dets_p = det_sig_mult * sig_p**dims
dets_q = det_sig_mult * sig_q**dims
dets_avg = det_sig_mult * sig_avg**dims
# Compute non-stationary kernel
gain = np.sqrt(np.sqrt(dets_p * dets_q.T) / dets_avg)
return gain * np.exp(-cdist(x_p[:, -1][:, np.newaxis]/LS_mult,
x_q[:, -1][:, np.newaxis]/LS_mult, 'sqeuclidean') / ls_avg**2) # NOQA
def tree(x_p, x_q, params):
""" Implementation of Paciorek's kernel where length scale is defined as
a continuous function L(x), and computed by operations on L(x1) and L(x2)
Note - we globally apply ARD scaling, then inside the scaled space apply an
isotropic non-stationary treatment according to L(x)
Arguments:
x_p, x_q : n*d x-values
params(list): [ (d,) np array of length-scale multipliers scaling the...,
function L(x) ]
"""
assert(x_p.ndim==2)
if x_q is None:
# Just want the magnitude to be evaluated - its always 1 :)
return np.ones(x_p.shape[0])
LS_mult, LS_func = params # unpack parameters
dims = x_p.shape[1]
ls_p = LS_func(x_p)
ls_q = LS_func(x_q)
if dims > 1:
assert(LS_mult.shape[0] == dims)
assert(len(LS_mult.shape) == 1)
assert(len(ls_p.shape) == 1)
ls_p = ls_p[:, np.newaxis]
ls_q = ls_q[:, np.newaxis]
sig_mult = LS_mult**2
sig_p = ls_p**2
sig_q = ls_q**2
sig_avg = 0.5*(sig_p + sig_q.T)
ls_avg = sig_avg**0.5
# In the diagonal [axis aligned] case, the determinants of the length
# scales are simply their products
det_sig_mult = np.prod(sig_mult)
# todo(AL) can probably remove det_sig_mult altogether as it should cancel
dets_p = det_sig_mult * sig_p**dims
dets_q = det_sig_mult * sig_q**dims
dets_avg = det_sig_mult * sig_avg**dims
# Compute non-stationary kernel
gain = np.sqrt(np.sqrt(dets_p * dets_q.T) / dets_avg)
return gain * np.exp(-cdist(x_p / LS_mult, x_q / LS_mult, 'sqeuclidean')
/ ls_avg**2)
def nonstat_rr(x_p, x_q, params):
# A non-stationary implementaiton of Paciorek's kernel
# where the length scale is computed via a ridge regression:
# The hyper-parameters are given by:
# [LS_sigma, LS_mu, LS_noise, LS_x, LS_y0]
# Where LS_x, LS_y0 are the control points of the latent regression
# LS_mu is the prior on the length scale
# LS_sigma is the length scale of the length-scale regression
# LS_noise is the noise level of the length-scale regression
min_ls = 1e-3
assert(x_p.ndim == 2)
dims = x_p.shape[1]
if x_q is None:
# Just want the magnitude to be evaluated - its always 1 :)
return np.ones(x_p.shape[0])
# TODO(Al): allow anisotropy
LS_sigma, LS_mu, LS_noise, LS_x, LS_y0 = params
LS_y = LS_y0 - LS_mu
LS_kernel = gaussian # lambda x1, x2: gaussian(x1, x2, LS_sigma)
lsgp = train.condition(LS_x, LS_y, LS_kernel, [LS_sigma, [LS_noise]])
query_p = predict.query(x_p, lsgp)
query_q = predict.query(x_q, lsgp)
ls_p = (min_ls + (LS_mu+predict.mean(lsgp, query_p))**2)[:, np.newaxis]
ls_q = (min_ls + (LS_mu+predict.mean(lsgp, query_q))**2)[:, np.newaxis]
sig_p = ls_p**2
sig_q = ls_q**2
sig_avg = 0.5*(sig_p + sig_q.T)
# ls_avg = sig_avg**0.5
dets_p = sig_p**dims
dets_q = sig_q**dims
dets_avg = sig_avg**dims
# Compute non-stationary kernel
gain = np.sqrt(np.sqrt(dets_p * dets_q.T) / dets_avg)
return gain * np.exp(-cdist(x_p, x_q, 'sqeuclidean') / sig_avg)
def mt_weights(x_p, x_q, params):
# This is an example of a multi-task aware kernel.
# In this case, it is a positive definite per-task weight matrix.
# TODO(Al): more intuitive weight parametrisation
task_p = x_p[:, -1].astype(int)
nTasks = task_p[-1]+1
tri = np.tri(nTasks)
tri[tri > 0] = params
task_weights = np.dot(tri, tri.T)
if x_q is None: # Just the diagonal
return task_weights[task_p, task_p]
else: # Normal
task_q = x_q[:, -1].astype(int)
return task_weights[task_p][:, task_q]
# The second half of this file deals with the infrastructure for composing
# kernels automatically from functions.
def named_target(covfn, fn_cache):
# Turns a string into a callable function
# also, automatically allows use of non multitask functions
# as multitask functions.
if covfn in fn_cache:
return fn_cache[covfn]
else:
knowns = globals()
if covfn in knowns:
fn = knowns[covfn]
elif covfn[:3] == 'mt_' and covfn[3:] in knowns:
target = knowns[covfn[3:]]
fn = lambda x_p, x_q, par: target(
x_p[:, :-1], x_q[:, :-1] if x_q is not None else None, par)
else:
raise ValueError("No valid target")
# logging.info('Binding %s to %s.' % (covfn, str(fn)))
fn_cache[covfn] = fn
return fn
def compose(user_kernel):
# user_kernel is fn(h,k)
# h - Hyperparameter function (min, max, mid)
# k - Kernel call function (name, hyper, optional_list_of_dimensions)
fn_cache = {}
def thekernel(x1, x2, thetas):
theta_iter = iter(thetas)
return user_kernel(lambda a, b, c=None: next(theta_iter),
lambda k, par, d=None:
(named_target(k, fn_cache)(x1, x2, par)) if d is None else
named_target(k, fn_cache)(x1[:, d],
None if x2 is None else x2[:,d], par))
return thekernel
def auto_range(user_kernel):
mins = []
mids = []
maxs = []
def range_log(min, max, mid=None):
if mid is None:
mid = 0.5*(min+max)
mins.append(min)
mids.append(mid)
maxs.append(max)
return 0.
user_kernel(range_log, lambda k,par,d=None:0.)
return Range(mins, maxs, mids)
def describer(user_kernel):
def theprinter(thetas):
theta_iter = iter(thetas[0]) # assuming noise hyperparams included
return str(user_kernel(
lambda a, b, c=None: next(theta_iter),
lambda k, par, d=None: Printer(k+'{'+Printer.txt(par)+'} ')))
return theprinter
# TODO(Al): re-implement special case for mt_weights with stub evaluation
# if covfn == 'mt_weights':
# n_tasks = np.floor(np.sqrt(2.0*len(params)))
# placeholder = np.arange(n_tasks)[:,np.newaxis]
# weights = globals()[covfn](placeholder, placeholder, params)
# return Printer(np.array_str(weights).replace('\n', ','))
# Return object for turning a covariance function call into a string.
# Currently supports addition and subtraction
# TODO(Al): tagging of hyperparameters
class Printer:
def __init__(self,val='?'):
self.val = val
def __mul__(a, b):
txta = Printer.txt(a)
txtb = Printer.txt(b)
if '+' in txta:
txta = '('+txta+')'
if '+' in txtb:
txtb = '('+txtb+')'
return Printer(txta+'*'+txtb)
def __str__(self):
return self.val
def __add__(a, b):
return Printer(Printer.txt(a)+'+'+Printer.txt(b))
def __rmul__(b, a):
return Printer.__mul__(a,b)
def __radd__(b, a):
return Printer.__add__(a,b)
@staticmethod
def txt(params):
if type(params) is list:
a = '['
for p in params:
a += Printer.txt(p)+', '
a += ']'
return a
if type(params) == np.ndarray and params.ndim > 1 and params.shape[1] == 1:
params = params.T[0]
if type(params) is float or type(params) is np.float64:
return "{:.3f}".format(params)
else:
return str(params)
|
NICTA/dora
|
dora/regressors/gp/kernel.py
|
Python
|
apache-2.0
| 12,947
|
[
"Gaussian"
] |
24c44aa5d695ef1070e6854de2acd1be70f1dbf20af53910d677cd0407e5e5a3
|
"""
The iterative sector decomposition routines.
"""
from .common import Sector, refactorize
from ..algebra import Polynomial, Product
from ..misc import powerset
import numpy as np
# ********************** primary decomposition **********************
def primary_decomposition_polynomial(polynomial, indices=None):
r'''
Perform the primary decomposition on a single polynomial.
.. seealso::
:func:`.primary_decomposition`
:param polynomial:
:class:`.algebra.Polynomial`;
The polynomial to eliminate the Dirac delta from.
:param indices:
iterable of integers or None;
The indices of the parameters to be considered as
integration variables. By default (``indices=None``),
all parameters are considered as integration
variables.
'''
primary_sectors = []
# consider all indices if `indices` is None
if indices is None:
indices = range(polynomial.expolist.shape[1])
coeffs = polynomial.coeffs
expolist = polynomial.expolist
polysymbols = polynomial.polysymbols
if len(indices) == 1:
# do not remove the only Feynman parameter from the `polysymbols`
sectorpoly = polynomial.copy()
sectorpoly.expolist[:,indices[0]] = 0
return [sectorpoly]
else:
for i in indices:
# keep the type (`polynomial` can have a subtype of `Polynomial`)
sectorpoly = polynomial.copy()
# "pinch" (delete) Feynman parameter `i`
# => this is equivalent to setting the exponent to zero
# => that is however equivalent to setting the parameter to one
sectorpoly.expolist = np.delete(expolist,i,axis=1)
sectorpoly.polysymbols = np.delete(polysymbols,i)
sectorpoly.number_of_variables -= 1
primary_sectors.append(sectorpoly)
return primary_sectors
def primary_decomposition(sector, indices=None):
r'''
Perform the primary decomposition as described in
chapter 3.2 (part I) of arXiv:0803.4177v2 [Hei08]_.
Return a list of :class:`.Sector` - the primary
sectors.
For `N` Feynman parameters, there are `N` primary
sectors where the `i`-th Feynman parameter is set to
`1` in sector `i`.
.. seealso::
:func:`.primary_decomposition_polynomial`
:param sector:
:class:`.Sector`;
The container holding the polynomials (typically
:math:`U` and :math:`F`) to eliminate the Dirac
delta from.
:param indices:
iterable of integers or None;
The indices of the parameters to be considered as
integration variables. By default (``indices=None``),
all parameters are considered as integration
variables.
'''
# consider all variables if `indices` is None
# convert `indices` to list otherwise
if indices is None:
N = sector.number_of_variables
indices = range(N)
else:
indices = list(indices)
N = len(indices)
# Must perform `primary_decomposition_polynomial` for each element
# of `sector.other`, `[sector.Jacobian]`, and each of the two factors
# of `sector.cast`.
primary_sector_polynomials_other = [primary_decomposition_polynomial(poly, indices) for poly in sector.other]
primary_sector_polynomials_Jacobian = primary_decomposition_polynomial(sector.Jacobian, indices)
primary_sector_polynomials_cast_factor0 = [primary_decomposition_polynomial(polyprod.factors[0], indices) for polyprod in sector.cast]
primary_sector_polynomials_cast_factor1 = [primary_decomposition_polynomial(polyprod.factors[1], indices) for polyprod in sector.cast]
# Collect the primary decomposed polynomials back into the `Sector` container class
primary_sectors = [
Sector(
[Product(cast0[sector_index],cast1[sector_index]) for cast0,cast1 in zip(primary_sector_polynomials_cast_factor0, primary_sector_polynomials_cast_factor1)],
[other[sector_index] for other in primary_sector_polynomials_other],
primary_sector_polynomials_Jacobian[sector_index]
) for sector_index in range(N)
]
return primary_sectors
# ********************** iterative decomposition **********************
class EndOfDecomposition(Exception):
'''
This exception is raised if the function
:func:`.iteration_step` is called although
the sector is already in standard form.
'''
pass
def remap_parameters(singular_parameters, Jacobian, *polynomials):
r'''
Remap the Feynman parameters according to eq. (16) of
arXiv:0803.4177v2 [Hei08]_. The parameter whose index comes
first in `singular_parameters` is kept fix.
The remapping is done in place; i.e. the `polynomials` are
**NOT** copied.
:param singular_parameters:
list of integers;
The indices :math:`\alpha_r` such that at least one
of `polynomials` becomes zero if all
:math:`t_{\alpha_r} \rightarrow 0`.
:param Jacobian:
:class:`.Polynomial`;
The Jacobian determinant is multiplied to this polynomial.
:param polynomials:
abritrarily many instances of :class:`.algebra.Polynomial`
where all of these have an equal number of variables;
The polynomials of Feynman parameters to be remapped. These
are typically :math:`F` and :math:`U`.
Example:
.. code-block:: python
remap_parameters([1,2], Jacobian, F, U)
'''
assert polynomials, "No polynomial for modification passed"
num_parameters = polynomials[0].expolist.shape[1]
for poly in polynomials:
assert num_parameters == poly.expolist.shape[1], 'All polynomials must have the same number of variables'
for poly in polynomials:
for param in singular_parameters[1:]:
poly.expolist[:,singular_parameters[0]] += poly.expolist[:,param] # This modifies in place!
Jacobian.expolist[:,singular_parameters[0]] += len(singular_parameters) - 1
def find_singular_set(sector, indices=None):
'''
Function within the iterative sector decomposition procedure
which heuristically chooses an optimal decomposition set.
The strategy was introduced in arXiv:hep-ph/0004013 [BH00]_
and is described in 4.2.2 of arXiv:1410.7939 [Bor14]_.
Return a list of indices.
:param sector:
:class:`.Sector`;
The sector to be decomposed.
:param indices:
iterable of integers or None;
The indices of the parameters to be considered as
integration variables. By default (``indices=None``),
all parameters are considered as integration
variables.
'''
# consider all indices if `indices` is None
if indices is None:
indices = range(sector.number_of_variables)
def get_poly_to_transform(sector, indices):
'''
Return a :class:`.algebra.Product` in
`sector.cast` that is not in the desired form
`<monomial> * <const + ...>` yet.
Raise `EndOfDecomposition` if the desired form is
reached.
'''
for polyprod in sector.cast:
if not polyprod.factors[1].has_constant_term(indices):
return polyprod
# Control only reaches this point if the desired form is
# already reached for all polynomials in ``sector.cast``.
raise EndOfDecomposition()
# find a polynomial to cast that is not in the form ``const + ... yet``
polyprod = get_poly_to_transform(sector, indices)
poly = polyprod.factors[1]
possible_sets = []
# find sets that nullyfy the selected polynomial
# only consider sets of the smallest possible size
for singular_set in powerset(indices,min_length=2):
if possible_sets and len(possible_sets[0])<len(singular_set):
break
if poly.becomes_zero_for(singular_set):
possible_sets.append(singular_set)
assert possible_sets
# First check how many poynomials of the sector nullify for
# each set of the `possible sets` of fixed length.
# Second only gather those sets which nullify the most polynomials
howmany_max = 1
howmany = 0
best_sets = []
for singular_set in possible_sets:
for polyprod in sector.cast:
if not polyprod.factors[1].has_constant_term(indices):
howmany += 1
if howmany > howmany_max:
best_sets = []
howmany_max = howmany
best_sets.append(singular_set)
howmany = 0
# Choose the set of Feynman parameters with the
# highest powers for remapping.
exposum_max = -np.inf
for test_set in best_sets:
exposum = poly.expolist[:,test_set].max(axis=0).sum()
if exposum > exposum_max:
exposum_max = exposum
best_set = test_set
assert np.isfinite(exposum_max)
# return the chosen set
return best_set
def iteration_step(sector, indices=None):
'''
Run a single step of the iterative sector decomposition as described
in chapter 3.2 (part II) of arXiv:0803.4177v2 [Hei08]_.
Return an iterator of :class:`.Sector` - the arising subsectors.
:param sector:
:class:`.Sector`;
The sector to be decomposed.
:param indices:
iterable of integers or None;
The indices of the parameters to be considered as
integration variables. By default (``indices=None``),
all parameters are considered as integration
variables.
'''
# consider all indices if `indices` is None
if indices is None:
indices = range(sector.number_of_variables)
# find a set that describes the transformation to be performed
singular_set = find_singular_set(sector, indices)
# We have to generate a subsector for each Feynman parameter
# that appears in `singular_set`.
# In order to comply with `remap_parameters`, we create
# `len(singular_set)` copies of `singular_set` such that
# each Feynman parameter is in the first position exactly
# once.
subsector_defining_singular_sets = [list(singular_set) for item in singular_set]
for i,item in enumerate(subsector_defining_singular_sets):
# swap the leading and the i-th item
item[0],item[i] = item[i],item[0]
# Call `remap_parameters` for each arising subsector.
for singular_set in subsector_defining_singular_sets:
subsector = sector.copy()
polynomials_to_transform = [polyprod.factors[0] for polyprod in subsector.cast] + \
[polyprod.factors[1] for polyprod in subsector.cast] + \
subsector.other + [subsector.Jacobian]
remap_parameters(singular_set, subsector.Jacobian, *polynomials_to_transform)
for polyprod in subsector.cast:
refactorize(polyprod,singular_set[0])
yield subsector
def iterative_decomposition(sector, indices=None):
'''
Run the iterative sector decomposition as described
in chapter 3.2 (part II) of arXiv:0803.4177v2 [Hei08]_.
Return an iterator of :class:`.Sector` - the
arising subsectors.
:param sector:
:class:`.Sector`;
The sector to be decomposed.
:param indices:
iterable of integers or None;
The indices of the parameters to be considered as
integration variables. By default (``indices=None``),
all parameters are considered as integration
variables.
'''
# convert `indices` to list if not None
if indices is not None:
indices = list(indices)
try:
subsectors = iteration_step(sector, indices) # only this line can raise `EndOfDecomposition`
for subsector in subsectors:
for deeper_subsector in iterative_decomposition(subsector, indices):
yield deeper_subsector
except EndOfDecomposition:
yield sector
|
mppmu/secdec
|
pySecDec/decomposition/iterative.py
|
Python
|
gpl-3.0
| 11,981
|
[
"DIRAC"
] |
63ad6dc787f182040a9000337bf66d2bef324460a753d1f079e09fdedcd49625
|
from edc.subject.registration.models import RegisteredSubject
from edc.subject.rule_groups.classes import (RuleGroup, site_rule_groups,
Logic, RequisitionRule)
# from .models import MaternalConsent
def func_maternal_lab(visit_instance):
visit = ['1000M']
# maternal_id = MaternalConsent.objects.get(subject_identifier=visit_instance.registered_subject.subject_identifier)
if visit_instance.appointment.visit_definition.code in visit:
return True
return False
class MaternalLabRuleGroup(RuleGroup):
"""Ensures a Viral Load blood draw requisition for the right visits"""
ctrl_vl = RequisitionRule(
logic=Logic(
predicate=func_maternal_lab,
consequence='new',
alternative='none'),
target_model=[('eit_lab', 'maternalrequisition')],
target_requisition_panels=['Viral Load', ], )
"""Ensures a CD4 blood draw requisition for the right visits"""
ctrl_cd4 = RequisitionRule(
logic=Logic(
predicate=func_maternal_lab,
consequence='new',
alternative='none'),
target_model=[('eit_lab', 'maternalrequisition')],
target_requisition_panels=['CD4 (ARV)', ], )
"""Ensures a PBMC blood draw requisition for the right visits"""
ctrl_pbmc = RequisitionRule(
logic=Logic(
predicate=func_maternal_lab,
consequence='new',
alternative='none'),
target_model=[('eit_lab', 'maternalrequisition')],
target_requisition_panels=['PBMC Plasma (STORE ONLY)', ], )
class Meta:
app_label = 'eit_maternal'
source_fk = None
source_model = RegisteredSubject
site_rule_groups.register(MaternalLabRuleGroup)
|
botswana-harvard/edc-bhp074
|
bhp074/apps/eit_maternal/rule_groups.py
|
Python
|
gpl-2.0
| 1,782
|
[
"VisIt"
] |
3f1a089fd4a01ce900d82a3c96e60fc6f6e515d2ecb4bab300de47a123a9d81b
|
import warnings
from typing import Callable, Dict, List, Optional, Union
from torch import nn, Tensor
from torchvision.ops import misc as misc_nn_ops
from torchvision.ops.feature_pyramid_network import ExtraFPNBlock, FeaturePyramidNetwork, LastLevelMaxPool
from .. import mobilenet, resnet
from .._utils import IntermediateLayerGetter
class BackboneWithFPN(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediateLayerGetter apply here.
Args:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(
self,
backbone: nn.Module,
return_layers: Dict[str, str],
in_channels_list: List[int],
out_channels: int,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> None:
super().__init__()
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=extra_blocks,
)
self.out_channels = out_channels
def forward(self, x: Tensor) -> Dict[str, Tensor]:
x = self.body(x)
x = self.fpn(x)
return x
def resnet_fpn_backbone(
backbone_name: str,
pretrained: bool,
norm_layer: Callable[..., nn.Module] = misc_nn_ops.FrozenBatchNorm2d,
trainable_layers: int = 3,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> BackboneWithFPN:
"""
Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.
Examples::
>>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
>>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3)
>>> # get some dummy image
>>> x = torch.rand(1,3,64,64)
>>> # compute the output
>>> output = backbone(x)
>>> print([(k, v.shape) for k, v in output.items()])
>>> # returns
>>> [('0', torch.Size([1, 256, 16, 16])),
>>> ('1', torch.Size([1, 256, 8, 8])),
>>> ('2', torch.Size([1, 256, 4, 4])),
>>> ('3', torch.Size([1, 256, 2, 2])),
>>> ('pool', torch.Size([1, 256, 1, 1]))]
Args:
backbone_name (string): resnet architecture. Possible values are 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet
norm_layer (callable): it is recommended to use the default value. For details visit:
(https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)
trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
returned_layers (list of int): The layers of the network to return. Each entry must be in ``[1, 4]``.
By default all layers are returned.
extra_blocks (ExtraFPNBlock or None): if provided, extra operations will
be performed. It is expected to take the fpn features, the original
features and the names of the original features as input, and returns
a new list of feature maps and their corresponding names. By
default a ``LastLevelMaxPool`` is used.
"""
backbone = resnet.__dict__[backbone_name](pretrained=pretrained, norm_layer=norm_layer)
return _resnet_fpn_extractor(backbone, trainable_layers, returned_layers, extra_blocks)
def _resnet_fpn_extractor(
backbone: resnet.ResNet,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> BackboneWithFPN:
# select layers that wont be frozen
if trainable_layers < 0 or trainable_layers > 5:
raise ValueError(f"Trainable layers should be in the range [0,5], got {trainable_layers}")
layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers]
if trainable_layers == 5:
layers_to_train.append("bn1")
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [1, 2, 3, 4]
if min(returned_layers) <= 0 or max(returned_layers) >= 5:
raise ValueError(f"Each returned layer should be in the range [1,4]. Got {returned_layers}")
return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
out_channels = 256
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
def _validate_trainable_layers(
pretrained: bool,
trainable_backbone_layers: Optional[int],
max_value: int,
default_value: int,
) -> int:
# don't freeze any layers if pretrained model or backbone is not used
if not pretrained:
if trainable_backbone_layers is not None:
warnings.warn(
"Changing trainable_backbone_layers has not effect if "
"neither pretrained nor pretrained_backbone have been set to True, "
f"falling back to trainable_backbone_layers={max_value} so that all layers are trainable"
)
trainable_backbone_layers = max_value
# by default freeze first blocks
if trainable_backbone_layers is None:
trainable_backbone_layers = default_value
if trainable_backbone_layers < 0 or trainable_backbone_layers > max_value:
raise ValueError(
f"Trainable backbone layers should be in the range [0,{max_value}], got {trainable_backbone_layers} "
)
return trainable_backbone_layers
def mobilenet_backbone(
backbone_name: str,
pretrained: bool,
fpn: bool,
norm_layer: Callable[..., nn.Module] = misc_nn_ops.FrozenBatchNorm2d,
trainable_layers: int = 2,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> nn.Module:
backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained, norm_layer=norm_layer)
return _mobilenet_extractor(backbone, fpn, trainable_layers, returned_layers, extra_blocks)
def _mobilenet_extractor(
backbone: Union[mobilenet.MobileNetV2, mobilenet.MobileNetV3],
fpn: bool,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> nn.Module:
backbone = backbone.features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1]
num_stages = len(stage_indices)
# find the index of the layer from which we wont freeze
if trainable_layers < 0 or trainable_layers > num_stages:
raise ValueError(f"Trainable layers should be in the range [0,{num_stages}], got {trainable_layers} ")
freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers]
for b in backbone[:freeze_before]:
for parameter in b.parameters():
parameter.requires_grad_(False)
out_channels = 256
if fpn:
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [num_stages - 2, num_stages - 1]
if min(returned_layers) < 0 or max(returned_layers) >= num_stages:
raise ValueError(f"Each returned layer should be in the range [0,{num_stages - 1}], got {returned_layers} ")
return_layers = {f"{stage_indices[k]}": str(v) for v, k in enumerate(returned_layers)}
in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers]
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
else:
m = nn.Sequential(
backbone,
# depthwise linear combination of channels to reduce their size
nn.Conv2d(backbone[-1].out_channels, out_channels, 1),
)
m.out_channels = out_channels # type: ignore[assignment]
return m
|
pytorch/vision
|
torchvision/models/detection/backbone_utils.py
|
Python
|
bsd-3-clause
| 9,593
|
[
"VisIt"
] |
e669f7ca5435190461a7cd4819de57f1a92d80ff96bc346aca1cbea9465c3571
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys, os
import numpy as np
import numpy.testing as npt
import scipy.spatial.distance
from unittest import skipIf
from mdtraj.utils.six.moves import xrange
from msmbuilder import metrics
from common import load_traj
class TestRMSD():
"Test the msmbuilder.metrics.RMSD module"
def setup(self):
self.traj = load_traj()
self.n_frames = self.traj.n_frames
self.n_atoms = self.traj.n_atoms
# RMSD from frame 0 to other frames
self.target = np.array([0,0,0.63297522])
def test_prepare(self):
rmsds = [metrics.RMSD(), # all atom indices
metrics.RMSD(range(self.n_atoms)),
metrics.RMSD(xrange(self.n_atoms)),
metrics.RMSD(np.arange(self.n_atoms))]
for metric in rmsds:
ptraj = metric.prepare_trajectory(self.traj)
def test_one_to_all(self):
for rmsd in [metrics.RMSD(), metrics.RMSD(omp_parallel=False)]:
ptraj = rmsd.prepare_trajectory(self.traj)
d0 = rmsd.one_to_all(ptraj, ptraj, 0)
npt.assert_array_almost_equal(d0, self.target)
def test_one_to_many(self):
for rmsd in [metrics.RMSD(), metrics.RMSD(omp_parallel=False)]:
ptraj = rmsd.prepare_trajectory(self.traj)
for i in range(self.n_frames):
di = rmsd.one_to_many(ptraj, ptraj, 0, [i])
npt.assert_approx_equal(self.target[i], di)
def test_all_pairwise(self):
sys.stderr = open('/dev/null')
for rmsd in [metrics.RMSD(), metrics.RMSD(omp_parallel=False)]:
ptraj = rmsd.prepare_trajectory(self.traj)
d1 = rmsd.all_pairwise(ptraj)
target = [ 0., 0.63297522, 0.63297522]
npt.assert_array_almost_equal(d1, target)
sys.stderr=sys.__stderr__
|
msmbuilder/msmbuilder-legacy
|
tests/test_rmsd.py
|
Python
|
gpl-2.0
| 1,970
|
[
"MDTraj"
] |
0fe3e240a87bd10364ca1abf7ac09d7c08c9af0d7d38875f3572e47d10170359
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/384')
from data_384 import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(121,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((15,1)))
sigma = np.matrix(np.zeros((15,1)))
DIVS = m/15
while (index < 15):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((15,n)))
DIVS = m/15
for i in range(n):
index = 0
while (index < 15):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[121:242,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[121:242,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[121:242,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[121:242,105:140])
mu_obj1,sigma_obj1 = feature_to_mu_sigma(Fmat[121:242,140:141])
mu_obj2,sigma_obj2 = feature_to_mu_sigma(Fmat[121:242,141:142])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(Not now), Contact Area(For now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((15,2))
B_rm = np.zeros((15,2))
B_sf = np.zeros((15,2))
B_sm = np.zeros((15,2))
for num_states in range(15):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [1.0/15.0] * 15
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[121:242,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:121,1:5]
total_seq_rm = total_seq[0:121,36:40]
total_seq_sf = total_seq[0:121,71:75]
total_seq_sm = total_seq[0:121,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0],total_seq[0:121,2:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35],total_seq[0:121,37:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70],total_seq[0:121,72:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105],total_seq[0:121,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0],total_seq[0:121,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35],total_seq[0:121,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70],total_seq[0:121,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105],total_seq[0:121,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0:2],total_seq[0:121,3:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35:37],total_seq[0:121,38:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70:72],total_seq[0:121,73:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105:107],total_seq[0:121,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+2],total_seq[0:121,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+37],total_seq[0:121,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+72],total_seq[0:121,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+107],total_seq[0:121,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[0:121,0:3],total_seq[0:121,4:5]))
total_seq_rm = np.column_stack((total_seq[0:121,35:38],total_seq[0:121,39:40]))
total_seq_sf = np.column_stack((total_seq[0:121,70:73],total_seq[0:121,74:75]))
total_seq_sm = np.column_stack((total_seq[0:121,105:108],total_seq[0:121,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+3],total_seq[0:121,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+38],total_seq[0:121,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+73],total_seq[0:121,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+108],total_seq[0:121,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:121,0:4]
total_seq_rm = total_seq[0:121,35:39]
total_seq_sf = total_seq[0:121,70:74]
total_seq_sm = total_seq[0:121,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:121,0]
total_seq_rm = total_seq[0:121,35]
total_seq_sf = total_seq[0:121,70]
total_seq_sm = total_seq[0:121,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[0:121,1]
total_seq_rm = total_seq[0:121,36]
total_seq_sf = total_seq[0:121,71]
total_seq_sm = total_seq[0:121,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:121,2]
total_seq_rm = total_seq[0:121,37]
total_seq_sf = total_seq[0:121,72]
total_seq_sm = total_seq[0:121,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:121,3]
total_seq_rm = total_seq[0:121,38]
total_seq_sf = total_seq[0:121,73]
total_seq_sm = total_seq[0:121,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:121,4]
total_seq_rm = total_seq[0:121,39]
total_seq_sf = total_seq[0:121,74]
total_seq_sm = total_seq[0:121,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:121,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:121,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:121,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:121,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:121,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/area_codes/number_of_states/hmm_crossvalidation_area_15_states.py
|
Python
|
mit
| 17,008
|
[
"Gaussian",
"Mayavi"
] |
b7db6493e38ffee9c3b0fc7e7773c203fbf863ddf63f7b6ef7aff5a2a91ad3f2
|
# -*- coding: utf-8 -*-
"""
sphinx.writers.manpage
~~~~~~~~~~~~~~~~~~~~~~
Manual page writer, extended for Sphinx custom nodes.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import warnings
from docutils import nodes
from docutils.writers.manpage import (
MACRO_DEF,
Writer,
Translator as BaseTranslator
)
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
from sphinx.util.compat import docutils_version
from sphinx.util.i18n import format_date
class ManualPageWriter(Writer):
def __init__(self, builder):
Writer.__init__(self)
self.builder = builder
self.translator_class = (
self.builder.translator_class or ManualPageTranslator)
def translate(self):
transform = NestedInlineTransform(self.document)
transform.apply()
visitor = self.translator_class(self.builder, self.document)
self.visitor = visitor
self.document.walkabout(visitor)
self.output = visitor.astext()
class NestedInlineTransform(object):
"""
Flatten nested inline nodes:
Before:
<strong>foo=<emphasis>1</emphasis>
&bar=<emphasis>2</emphasis></strong>
After:
<strong>foo=</strong><emphasis>var</emphasis>
<strong>&bar=</strong><emphasis>2</emphasis>
"""
def __init__(self, document):
self.document = document
def apply(self):
def is_inline(node):
return isinstance(node, (nodes.literal, nodes.emphasis, nodes.strong))
for node in self.document.traverse(is_inline):
if any(is_inline(subnode) for subnode in node):
pos = node.parent.index(node)
for subnode in reversed(node[1:]):
node.remove(subnode)
if is_inline(subnode):
node.parent.insert(pos + 1, subnode)
else:
newnode = node.__class__('', subnode, **node.attributes)
node.parent.insert(pos + 1, newnode)
class ManualPageTranslator(BaseTranslator):
"""
Custom translator.
"""
def __init__(self, builder, *args, **kwds):
BaseTranslator.__init__(self, *args, **kwds)
self.builder = builder
self.in_productionlist = 0
# first title is the manpage title
self.section_level = -1
# docinfo set by man_pages config value
self._docinfo['title'] = self.document.settings.title
self._docinfo['subtitle'] = self.document.settings.subtitle
if self.document.settings.authors:
# don't set it if no author given
self._docinfo['author'] = self.document.settings.authors
self._docinfo['manual_section'] = self.document.settings.section
# docinfo set by other config values
self._docinfo['title_upper'] = self._docinfo['title'].upper()
if builder.config.today:
self._docinfo['date'] = builder.config.today
else:
self._docinfo['date'] = format_date(builder.config.today_fmt or _('%b %d, %Y'),
language=builder.config.language)
self._docinfo['copyright'] = builder.config.copyright
self._docinfo['version'] = builder.config.version
self._docinfo['manual_group'] = builder.config.project
# In docutils < 0.11 self.append_header() was never called
if docutils_version < (0, 11):
self.body.append(MACRO_DEF)
# Overwrite admonition label translations with our own
for label, translation in admonitionlabels.items():
self.language.labels[label] = self.deunicode(translation)
# overwritten -- added quotes around all .TH arguments
def header(self):
tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def visit_start_of_file(self, node):
pass
def depart_start_of_file(self, node):
pass
def visit_desc(self, node):
self.visit_definition_list(node)
def depart_desc(self, node):
self.depart_definition_list(node)
def visit_desc_signature(self, node):
self.visit_definition_list_item(node)
self.visit_term(node)
def depart_desc_signature(self, node):
self.depart_term(node)
def visit_desc_signature_line(self, node):
pass
def depart_desc_signature_line(self, node):
self.body.append(' ')
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.body.append(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
def depart_desc_parameter(self, node):
pass
def visit_desc_optional(self, node):
self.body.append('[')
def depart_desc_optional(self, node):
self.body.append(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.visit_definition(node)
def depart_desc_content(self, node):
self.depart_definition(node)
def visit_versionmodified(self, node):
self.visit_paragraph(node)
def depart_versionmodified(self, node):
self.depart_paragraph(node)
# overwritten -- don't make whole of term bold if it includes strong node
def visit_term(self, node):
if node.traverse(nodes.strong):
self.body.append('\n')
else:
BaseTranslator.visit_term(self, node)
def visit_termsep(self, node):
warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
'This warning is displayed because some Sphinx extension '
'uses sphinx.addnodes.termsep. Please report it to '
'author of the extension.', RemovedInSphinx16Warning)
self.body.append(', ')
raise nodes.SkipNode
# overwritten -- we don't want source comments to show up
def visit_comment(self, node):
raise nodes.SkipNode
# overwritten -- added ensure_eol()
def visit_footnote(self, node):
self.ensure_eol()
BaseTranslator.visit_footnote(self, node)
# overwritten -- handle footnotes rubric
def visit_rubric(self, node):
self.ensure_eol()
if len(node.children) == 1:
rubtitle = node.children[0].astext()
if rubtitle in ('Footnotes', _('Footnotes')):
self.body.append('.SH ' + self.deunicode(rubtitle).upper() +
'\n')
raise nodes.SkipNode
else:
self.body.append('.sp\n')
def depart_rubric(self, node):
pass
def visit_seealso(self, node):
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
self.depart_admonition(node)
def visit_productionlist(self, node):
self.ensure_eol()
names = []
self.in_productionlist += 1
self.body.append('.sp\n.nf\n')
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.defs['strong'][0])
self.body.append(self.deunicode(lastname))
self.body.append(self.defs['strong'][1])
self.body.append(' ::= ')
elif lastname is not None:
self.body.append('%s ' % (' '*len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('\n.fi\n')
self.in_productionlist -= 1
raise nodes.SkipNode
def visit_production(self, node):
pass
def depart_production(self, node):
pass
# overwritten -- don't emit a warning for images
def visit_image(self, node):
if 'alt' in node.attributes:
self.body.append(_('[image: %s]') % node['alt'] + '\n')
self.body.append(_('[image]') + '\n')
raise nodes.SkipNode
# overwritten -- don't visit inner marked up nodes
def visit_reference(self, node):
self.body.append(self.defs['reference'][0])
# avoid repeating escaping code... fine since
# visit_Text calls astext() and only works on that afterwards
self.visit_Text(node)
self.body.append(self.defs['reference'][1])
uri = node.get('refuri', '')
if uri.startswith('mailto:') or uri.startswith('http:') or \
uri.startswith('https:') or uri.startswith('ftp:'):
# if configured, put the URL after the link
if self.builder.config.man_show_urls and \
node.astext() != uri:
if uri.startswith('mailto:'):
uri = uri[7:]
self.body.extend([
' <',
self.defs['strong'][0], uri, self.defs['strong'][1],
'>'])
raise nodes.SkipNode
def visit_number_reference(self, node):
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_centered(self, node):
self.ensure_eol()
self.body.append('.sp\n.ce\n')
def depart_centered(self, node):
self.body.append('\n.ce 0\n')
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_highlightlang(self, node):
pass
def depart_highlightlang(self, node):
pass
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_acks(self, node):
self.ensure_eol()
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
self.body.append('\n')
raise nodes.SkipNode
def visit_hlist(self, node):
self.visit_bullet_list(node)
def depart_hlist(self, node):
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_literal_emphasis(self, node):
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
return self.visit_strong(node)
def depart_literal_strong(self, node):
return self.depart_strong(node)
def visit_abbreviation(self, node):
pass
def depart_abbreviation(self, node):
pass
def visit_manpage(self, node):
return self.visit_strong(node)
def depart_manpage(self, node):
return self.depart_strong(node)
# overwritten: handle section titles better than in 0.6 release
def visit_title(self, node):
if isinstance(node.parent, addnodes.seealso):
self.body.append('.IP "')
return
elif isinstance(node.parent, nodes.section):
if self.section_level == 0:
# skip the document title
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH %s\n' %
self.deunicode(node.astext().upper()))
raise nodes.SkipNode
return BaseTranslator.visit_title(self, node)
def depart_title(self, node):
if isinstance(node.parent, addnodes.seealso):
self.body.append('"\n')
return
return BaseTranslator.depart_title(self, node)
def visit_raw(self, node):
if 'manpage' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_meta(self, node):
raise nodes.SkipNode
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html')
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/writers/manpage.py
|
Python
|
gpl-3.0
| 13,713
|
[
"VisIt"
] |
8b642afa0434e4ca7785b199177ebbaf6cff9cd4ecb8df788feb0885537d6b1c
|
# -*- coding: utf-8 -*-
'''
Master Reborn Add-on
Copyright (C) 2017 Master Reborn
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import base64, os, sys, urlparse
from resources.lib.modules import control, trakt
inprogress_db = control.setting('inprogress_db')
sysaddon = base64.b64decode('cGx1Z2luOi8vcGx1Z2luLnZpZGVvLm1hc3Rlci5yZWJvcm4v')
syshandle = int(sys.argv[1])
artPath = control.artPath()
addonFanart = control.addonFanart()
imdbCredentials = False if control.setting('imdb.user') == '' else True
traktCredentials = trakt.getTraktCredentialsInfo()
traktIndicators = trakt.getTraktIndicatorsInfo()
queueMenu = control.lang(32065).encode('utf-8')
movielist1 = control.setting('tmdb.movielist_name1')
movielist2 = control.setting('tmdb.movielist_name2')
movielist3 = control.setting('tmdb.movielist_name3')
movielist4 = control.setting('tmdb.movielist_name4')
movielist5 = control.setting('tmdb.movielist_name5')
movielist6 = control.setting('tmdb.movielist_name6')
movielist7 = control.setting('tmdb.movielist_name7')
movielist8 = control.setting('tmdb.movielist_name8')
movielist9 = control.setting('tmdb.movielist_name9')
movielist10 = control.setting('tmdb.movielist_name10')
movielist11 = control.setting('tmdb.movielist_name11')
movielist12 = control.setting('tmdb.movielist_name12')
movielist13 = control.setting('tmdb.movielist_name13')
movielist14 = control.setting('tmdb.movielist_name14')
movielist15 = control.setting('tmdb.movielist_name15')
movielist16 = control.setting('tmdb.movielist_name16')
movielist17 = control.setting('tmdb.movielist_name17')
movielist18 = control.setting('tmdb.movielist_name18')
movielist19 = control.setting('tmdb.movielist_name19')
movielist20 = control.setting('tmdb.movielist_name20')
movielist21 = control.setting('tmdb.movielist_name21')
movielist22 = control.setting('tmdb.movielist_name22')
movielist23 = control.setting('tmdb.movielist_name23')
movielist24 = control.setting('tmdb.movielist_name24')
movielist25 = control.setting('tmdb.movielist_name25')
movielist26 = control.setting('tmdb.movielist_name26')
movielist27 = control.setting('tmdb.movielist_name27')
movielist28 = control.setting('tmdb.movielist_name28')
movielist29 = control.setting('tmdb.movielist_name29')
movielist30 = control.setting('tmdb.movielist_name30')
tvlist1 = control.setting('tmdb.tvlist_name1')
tvlist2 = control.setting('tmdb.tvlist_name2')
tvlist3 = control.setting('tmdb.tvlist_name3')
tvlist4 = control.setting('tmdb.tvlist_name4')
tvlist5 = control.setting('tmdb.tvlist_name5')
tvlist6 = control.setting('tmdb.tvlist_name6')
tvlist7 = control.setting('tmdb.tvlist_name7')
tvlist8 = control.setting('tmdb.tvlist_name8')
tvlist9 = control.setting('tmdb.tvlist_name9')
tvlist10 = control.setting('tmdb.tvlist_name10')
tvlist11 = control.setting('tmdb.tvlist_name11')
tvlist12 = control.setting('tmdb.tvlist_name12')
tvlist13 = control.setting('tmdb.tvlist_name13')
tvlist14 = control.setting('tmdb.tvlist_name14')
tvlist15 = control.setting('tmdb.tvlist_name15')
tvlist16 = control.setting('tmdb.tvlist_name16')
tvlist17 = control.setting('tmdb.tvlist_name17')
tvlist18 = control.setting('tmdb.tvlist_name18')
tvlist19 = control.setting('tmdb.tvlist_name19')
tvlist20 = control.setting('tmdb.tvlist_name20')
tvlist21 = control.setting('tmdb.tvlist_name21')
tvlist22 = control.setting('tmdb.tvlist_name22')
tvlist23 = control.setting('tmdb.tvlist_name23')
tvlist24 = control.setting('tmdb.tvlist_name24')
tvlist25 = control.setting('tmdb.tvlist_name25')
tvlist26 = control.setting('tmdb.tvlist_name26')
tvlist27 = control.setting('tmdb.tvlist_name27')
tvlist28 = control.setting('tmdb.tvlist_name28')
tvlist29 = control.setting('tmdb.tvlist_name29')
tvlist30 = control.setting('tmdb.tvlist_name30')
class navigator:
def root(self):
self.addDirectoryItem('Merry Christmas!', 'movies&url=tmdbxmas', 'xmas.png', 'DefaultMovies.png')
self.addDirectoryItem(32001, 'movieNavigator', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem(32002, 'tvNavigator', 'channels.png', 'DefaultTVShows.png')
if not control.setting('movie.widget') == '0':
self.addDirectoryItem('Spotlight', 'movieWidget', 'latest-movies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('New Movies', 'movies&url=premiere', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(32026, 'tvshows&url=premiere', 'years.png', 'DefaultTVShows.png')
self.addDirectoryItem('My Lists', 'lists_navigator', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
if not control.setting('lists.widget') == '0':
self.addDirectoryItem('Trakt Movies', 'soullessNavigator', 'mymovies.png', 'DefaultVideoPlaylists.png')
self.addDirectoryItem('Trakt TV', 'tvshowstNavigator', 'mytvshows.png', 'DefaultVideoPlaylists.png')
self.addDirectoryItem(32027, 'calendars', 'networks.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(32007, 'channels', 'channels.png', 'DefaultMovies.png')
self.addDirectoryItem(32008, 'toolNavigator', 'tools.png', 'DefaultAddonProgram.png')
downloads = True if control.setting('downloads') == 'true' and (len(control.listDir(control.setting('movie.download.path'))[0]) > 0) else False
if downloads == True:
self.addDirectoryItem(32009, 'downloadNavigator', 'downloads.png', 'DefaultFolder.png')
self.addDirectoryItem(32010, 'searchNavigator', 'search.png', 'DefaultFolder.png')
self.addDirectoryItem('Changelog', 'ShowChangelog', 'icon.png', 'DefaultFolder.png')
self.endDirectory()
def movies(self, lite=False):
if inprogress_db == 'true':
self.addDirectoryItem("In Progress", 'movieProgress', 'trending.png', 'DefaultMovies.png')
self.addDirectoryItem('Featured', 'movies&url=featured', 'featured.png', 'DefaultRecentlyAddedMovies.png')
# self.addDirectoryItem('Trending', 'movies&url=trending', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Popular', 'movies&url=popular', 'populars.png', 'DefaultMovies.png')
self.addDirectoryItem('New Movies', 'movies&url=premiere', 'trending.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Top Rated', 'movies&url=views', 'most-viewed.png', 'DefaultMovies.png')
self.addDirectoryItem('In Theaters', 'movies&url=theaters', 'in-theaters.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Marvel Universe', 'movies&url=tmdbmarvel', 'marvel.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Oscar Winners', 'movies&url=tmdboscars', 'oscars.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Disney Collection', 'movies&url=tmdbdisney', 'disney.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Collections', 'collectionsMovies', 'collection.png', 'DefaultMovies.png')
self.addDirectoryItem('Kids Collections', 'kidsCollections', 'kidscollection.png', 'DefaultMovies.png')
self.addDirectoryItem('Holiday', 'holidayCollections', 'holidaycollections.png', 'DefaultMovies.png')
self.addDirectoryItem('Genres', 'movieGenres', 'genres.png', 'DefaultMovies.png')
self.addDirectoryItem('Years', 'movieYears', 'years.png', 'DefaultMovies.png')
self.addDirectoryItem('Persons', 'moviePersons', 'people.png', 'DefaultMovies.png')
self.addDirectoryItem('Certificates', 'movieCertificates', 'certificates.png', 'DefaultMovies.png')
self.addDirectoryItem(32028, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
self.addDirectoryItem(32010, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.endDirectory()
def soulless(self, lite=False):
self.accountCheck()
asrch = "{0} - {1}".format(control.lang2(20337).encode('utf-8'), control.lang2(137).encode('utf-8'))
if traktCredentials == True and imdbCredentials == True:
self.addDirectoryItem(32032, 'movies&url=traktcollection', 'trakt.png', 'DefaultMovies.png')
self.addDirectoryItem(32033, 'movies&url=traktwatchlist', 'trakt.png', 'DefaultMovies.png')
elif traktCredentials == True:
self.addDirectoryItem(32032, 'movies&url=traktcollection', 'trakt.png', 'DefaultMovies.png')
self.addDirectoryItem(32033, 'movies&url=traktwatchlist', 'trakt.png', 'DefaultMovies.png')
if traktCredentials == True:
self.addDirectoryItem(32035, 'movies&url=traktfeatured', 'trakt.png', 'DefaultMovies.png', queue=True)
# if traktIndicators == True:
# self.addDirectoryItem(32036, 'movies&url=trakthistory', 'trakt.png', 'DefaultMovies.png', queue=True)
self.addDirectoryItem("My Lists", 'movieUserlists', 'mymovies.png', 'DefaultMovies.png')
if lite == False:
self.addDirectoryItem(32031, 'movieliteNavigator', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem(asrch, 'moviePerson', 'actorsearch.png', 'DefaultMovies.png')
self.addDirectoryItem(32010, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.endDirectory()
def lists_navigator(self):
self.addDirectoryItem('[WATCHLIST] Movies', 'movieFavourites', 'mymovies.png', 'DefaultMovies.png')
self.addDirectoryItem('[WATCHLIST] TV Shows', 'tvFavourites', 'mymovies.png', 'DefaultMovies.png')
self.addDirectoryItem('[TMDB LIST] Movies', 'movielist', 'movies.png', 'DefaultMovies.png')
self.addDirectoryItem('[TMDB LIST] TV Shows', 'tvlist', 'channels.png', 'DefaultTVShows.png')
self.endDirectory()
def mymovies(self):
self.addDirectoryItem(movielist1, 'movies&url=mycustomlist1', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist2, 'movies&url=mycustomlist2', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist3, 'movies&url=mycustomlist3', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist4, 'movies&url=mycustomlist4', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist5, 'movies&url=mycustomlist5', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist6, 'movies&url=mycustomlist6', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist7, 'movies&url=mycustomlist7', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist8, 'movies&url=mycustomlist8', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist9, 'movies&url=mycustomlist9', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist10, 'movies&url=mycustomlist10', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist11, 'movies&url=mycustomlist11', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist12, 'movies&url=mycustomlist12', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist13, 'movies&url=mycustomlist13', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist14, 'movies&url=mycustomlist14', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist15, 'movies&url=mycustomlist15', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist16, 'movies&url=mycustomlist16', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist17, 'movies&url=mycustomlist17', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist18, 'movies&url=mycustomlist18', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist19, 'movies&url=mycustomlist19', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist20, 'movies&url=mycustomlist20', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist21, 'movies&url=mycustomlist21', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist22, 'movies&url=mycustomlist22', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist23, 'movies&url=mycustomlist23', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist24, 'movies&url=mycustomlist24', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist25, 'movies&url=mycustomlist25', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist26, 'movies&url=mycustomlist26', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist27, 'movies&url=mycustomlist27', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist28, 'movies&url=mycustomlist28', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist29, 'movies&url=mycustomlist29', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(movielist30, 'movies&url=mycustomlist30', 'mymovies.png', 'DefaultRecentlyAddedMovies.png')
self.endDirectory()
def mytv(self):
self.addDirectoryItem(tvlist1, 'tvshows&url=mycustomlist1', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist2, 'tvshows&url=mycustomlist2', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist3, 'tvshows&url=mycustomlist3', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist4, 'tvshows&url=mycustomlist4', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist5, 'tvshows&url=mycustomlist5', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist6, 'tvshows&url=mycustomlist6', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist7, 'tvshows&url=mycustomlist7', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist8, 'tvshows&url=mycustomlist8', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist9, 'tvshows&url=mycustomlist9', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist10, 'tvshows&url=mycustomlist10', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist11, 'tvshows&url=mycustomlist11', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist12, 'tvshows&url=mycustomlist12', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist13, 'tvshows&url=mycustomlist13', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist14, 'tvshows&url=mycustomlist14', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist15, 'tvshows&url=mycustomlist15', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist16, 'tvshows&url=mycustomlist16', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist17, 'tvshows&url=mycustomlist17', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist18, 'tvshows&url=mycustomlist18', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist19, 'tvshows&url=mycustomlist19', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist20, 'tvshows&url=mycustomlist20', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist21, 'tvshows&url=mycustomlist21', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist22, 'tvshows&url=mycustomlist22', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist23, 'tvshows&url=mycustomlist23', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist24, 'tvshows&url=mycustomlist24', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist25, 'tvshows&url=mycustomlist25', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist26, 'tvshows&url=mycustomlist26', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist27, 'tvshows&url=mycustomlist27', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist28, 'tvshows&url=mycustomlist28', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist29, 'tvshows&url=mycustomlist29', 'channels.png', 'DefaultTVShows.png')
self.addDirectoryItem(tvlist30, 'tvshows&url=mycustomlist30', 'channels.png', 'DefaultTVShows.png')
self.endDirectory()
def tvshowst(self, lite=False):
self.accountCheck()
if traktCredentials == True and imdbCredentials == True:
self.addDirectoryItem(32032, 'tvshows&url=traktcollection', 'trakt.png', 'DefaultTVShows.png')
self.addDirectoryItem(32033, 'tvshows&url=traktwatchlist', 'trakt.png', 'DefaultTVShows.png')
elif traktCredentials == True:
self.addDirectoryItem(32032, 'tvshows&url=traktcollection', 'trakt.png', 'DefaultTVShows.png')
self.addDirectoryItem(32033, 'tvshows&url=traktwatchlist', 'trakt.png', 'DefaultTVShows.png')
# if traktCredentials == True:
# self.addDirectoryItem('Featured', 'tvshows&url=traktfeatured', 'trakt.png', 'DefaultTVShows.png')
# if traktIndicators == True:
# self.addDirectoryItem('History', 'calendar&url=trakthistory', 'trakt.png', 'DefaultTVShows.png', queue=True)
# self.addDirectoryItem('Progress', 'calendar&url=progress', 'trakt.png', 'DefaultRecentlyAddedEpisodes.png', queue=True)
# self.addDirectoryItem('Calendar', 'calendar&url=mycalendar', 'trakt.png', 'DefaultRecentlyAddedEpisodes.png', queue=True)
self.addDirectoryItem('My Lists', 'tvUserlists', 'mytvshows.png', 'DefaultTVShows.png')
# if traktCredentials == True:
# self.addDirectoryItem('My Episodes', 'episodeUserlists', 'mytvshows.png', 'DefaultTVShows.png')
if lite == False:
self.addDirectoryItem('TV Shows', 'tvliteNavigator', 'tvshows.png', 'DefaultTVShows.png')
self.addDirectoryItem('Actor Search', 'tvPerson', 'actorsearch.png', 'DefaultTVShows.png')
self.addDirectoryItem('Search', 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.endDirectory()
def tvshows(self, lite=False):
if inprogress_db == 'true': self.addDirectoryItem("In Progress", 'showsProgress', 'trending.png', 'DefaultMovies.png')
self.addDirectoryItem('Featured', 'tvshows&url=featured', 'populars.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem('Popular', 'tvshows&url=popular', 'most-viewed.png', 'DefaultTVShows.png')
self.addDirectoryItem(32019, 'tvshows&url=views', 'most-viewed.png', 'DefaultTVShows.png')
self.addDirectoryItem(32026, 'tvshows&url=premiere', 'years.png', 'DefaultTVShows.png')
self.addDirectoryItem(32025, 'tvshows&url=active', 'years.png', 'DefaultTVShows.png')
self.addDirectoryItem('Kids TV', 'kidstvCollections', 'kidscollection.png', 'DefaultMovies.png')
self.addDirectoryItem('TV Collections', 'tvCollections', 'collection.png', 'DefaultMovies.png')
self.addDirectoryItem(32023, 'tvshows&url=rating', 'featured.png', 'DefaultTVShows.png')
self.addDirectoryItem(32011, 'tvGenres', 'genres.png', 'DefaultTVShows.png')
self.addDirectoryItem(32016, 'tvNetworks', 'networks.png', 'DefaultTVShows.png')
self.addDirectoryItem(32024, 'tvshows&url=airing', 'airing-today.png', 'DefaultTVShows.png')
self.addDirectoryItem(32027, 'calendars', 'networks.png', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(32010, 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.endDirectory()
def tools(self):
self.addDirectoryItem('[B]URL RESOLVER[/B]: Settings', 'urlresolversettings&query=0.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32043, 'openSettings&query=0.1', 'tools.png', 'DefaultAddonProgram.png')
# self.addDirectoryItem(32044, 'openSettings&query=3.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem(32045, 'openSettings&query=1.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Accounts', 'openSettings&query=2.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Providers', 'nanscrapersettings&query=1.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Debrid', 'openSettings&query=2.8', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Downloads', 'openSettings&query=3.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Subtitles', 'openSettings&query=4.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Watchlist', 'openSettings&query=5.0', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: Movie Lists', 'openSettings&query=6.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]SETTINGS[/B]: TVShow Lists', 'openSettings&query=7.1', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]Master Reborn[/B]: Views', 'viewsNavigator', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]Master Reborn[/B]: Clear Providers', 'clearSources', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]Master Reborn[/B]: Clear Cache', 'clearCache', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]BACKUP[/B]: Watchlist', 'backupwatchlist', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]RESTORE[/B]: Watchlist', 'restorewatchlist', 'tools.png', 'DefaultAddonProgram.png')
self.addDirectoryItem('[B]Master Reborn[/B]: Clear Progress Database', 'clearProgress', 'tools.png', 'DefaultAddonProgram.png')
self.endDirectory()
def downloads(self):
movie_downloads = control.setting('movie.download.path')
# tv_downloads = control.setting('tv.download.path')
if len(control.listDir(movie_downloads)[0]) > 0:
self.addDirectoryItem(32001, movie_downloads, 'movies.png', 'DefaultMovies.png', isAction=False)
self.endDirectory()
def search(self):
self.addDirectoryItem(32001, 'movieSearch', 'search.png', 'DefaultMovies.png')
self.addDirectoryItem(32002, 'tvSearch', 'search.png', 'DefaultTVShows.png')
self.addDirectoryItem(32029, 'moviePerson', 'people-search.png', 'DefaultMovies.png')
# self.addDirectoryItem(32030, 'tvPerson', 'people-search.png', 'DefaultTVShows.png')
self.endDirectory()
def views(self):
try:
control.idle()
items = [ (control.lang(32001).encode('utf-8'), 'movies'), (control.lang(32002).encode('utf-8'), 'tvshows'), (control.lang(32054).encode('utf-8'), 'seasons'), (control.lang(32038).encode('utf-8'), 'episodes') ]
select = control.selectDialog([i[0] for i in items], control.lang(32049).encode('utf-8'))
if select == -1: return
content = items[select][1]
title = control.lang(32059).encode('utf-8')
url = '%s?action=addView&content=%s' % (sys.argv[0], content)
poster, banner, fanart = control.addonPoster(), control.addonBanner(), control.addonFanart()
item = control.item(label=title)
item.setInfo(type='Video', infoLabels = {'title': title})
item.setArt({'icon': poster, 'thumb': poster, 'poster': poster, 'tvshow.poster': poster, 'season.poster': poster, 'banner': banner, 'tvshow.banner': banner, 'season.banner': banner})
item.setProperty('Fanart_Image', fanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=False)
control.content(int(sys.argv[1]), content)
control.directory(int(sys.argv[1]), cacheToDisc=True)
from resources.lib.modules import cache
views.setView(content, {})
except:
return
def accountCheck(self):
if traktCredentials == False:
control.idle()
control.infoDialog(control.lang(32042).encode('utf-8'), sound=True, icon='WARNING')
control.openSettings('2.12')
sys.exit()
def clearCache(self):
control.idle()
yes = control.yesnoDialog(control.lang(32056).encode('utf-8'), '', '')
if not yes: return
from resources.lib.modules import cache
cache.clear()
control.infoDialog(control.lang(32057).encode('utf-8'), sound=True, icon='INFO')
def addDirectoryItem(self, name, query, thumb, icon, queue=False, isAction=True, isFolder=True):
try: name = control.lang(name).encode('utf-8')
except: pass
url = '%s?action=%s' % (sysaddon, query) if isAction == True else query
thumb = os.path.join(artPath, thumb) if not artPath == None else icon
cm = []
if queue == True: cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.addContextMenuItems(cm)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
def collectionsMovies(self):
self.addDirectoryItem('Animal Kingdom', 'movies&url=tmdbanimal', 'animalkingdom.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Based On A True Story', 'movies&url=tmdbbased', 'truestories.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Cold War', 'movies&url=tmdbcold', 'cold.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Datenight', 'movies&url=tmdbdatenight', 'datenight.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('DC Universe', 'movies&url=tmdbdc', 'dc.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Dont Do Drugs', 'movies&url=tmdb420', '420.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Fight Club', 'movies&url=tmdbfight', 'fight.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Gamers Paradise', 'movies&url=tmdbgamers', 'gamersparadise.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Gangster Hits', 'movies&url=tmdbmafia', 'mob.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Girls With Guns', 'movies&url=tmdbgwg', 'gwg.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Hack The Planet', 'movies&url=tmdbhack', 'hack.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('The Heist', 'movies&url=tmdbheist', 'heists.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Immortal', 'movies&url=tmdbimmortal', 'immortal.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Its A Conspiracy', 'movies&url=tmdbconsp', 'consp.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Life in The Fast Lane', 'movies&url=tmdbfast', 'fast.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Marvel Universe', 'movies&url=tmdbmarvel', 'marvel.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Off The Shelf', 'movies&url=tmdbbooks', 'offtheshelf.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Out Of This World', 'movies&url=tmdbufo', 'ufo.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Snatched', 'movies&url=tmdbsnatched', 'snatched.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Stand-Up', 'movies&url=tmdbstandup', 'standup.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Streets of The Chi', 'movies&url=tmdbchi', 'chi.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Sports', 'movies&url=tmdbsports', 'sports.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('The Art Of The Con', 'movies&url=tmdbconman', 'conman.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('The Spy Who Streamed Me', 'movies&url=tmdbspy', 'spy.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Vigilante Justice', 'movies&url=tmdbvigilante', 'vigilante.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Welcome To The Hood', 'movies&url=tmdburban', 'hood.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('What A Tragedy', 'movies&url=tmdbtragedy', 'tragedy.png', 'DefaultRecentlyAddedMovies.png')
self.endDirectory()
def kidsCollections(self):
self.addDirectoryItem('Disney', 'movies&url=tmdbdisney', 'disney.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Dreamworks', 'movies&url=tmdbdreamworks', 'dreamworks.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Lego Collection', 'movies&url=tmdblego', 'lego.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Princesses', 'movies&url=tmdbprincess', 'princess.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Super Heroes', 'movies&url=tmdbhero', 'hero.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('The Ultimate Kids Collection', 'movies&url=tmdbkidz', 'kidsfav.png', 'DefaultRecentlyAddedMovies.png')
self.endDirectory()
def holidayCollections(self):
self.addDirectoryItem('Christmas', 'movies&url=tmdbchristmas', 'christmas.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Easter', 'movies&url=tmdbeaster', 'easter.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Halloween', 'movies&url=tmdbhalloween', 'halloween.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Independence Day', 'movies&url=tmdbfourth', 'fourth.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Thanksgiving', 'movies&url=tmdbthanks', 'thanks.png', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem('Valentines', 'movies&url=tmdbvalentines', 'valentines.png', 'DefaultRecentlyAddedMovies.png')
self.endDirectory()
def tvCollections(self):
self.addDirectoryItem('Blast From The Past', 'tvshows&url=tmdbblast', 'blast.png', 'DefaultTVShows.png')
self.addDirectoryItem('Blaze It TV', 'tvshows&url=tmdb420tv', '420.png', 'DefaultTVShows.png')
self.addDirectoryItem('Datenight', 'tvshows&url=tmdbdatenighttv', 'datenight.png', 'DefaultTVShows.png')
self.addDirectoryItem('Gamers Paradise', 'tvshows&url=tmdbgamers', 'gamersparadise.png', 'DefaultTVShows.png')
self.addDirectoryItem('Hungry Yet', 'tvshows&url=tmdbcooking', 'hungry.png', 'DefaultTVShows.png')
self.addDirectoryItem('Inked N Proud', 'tvshows&url=tmdbtats', 'inked.png', 'DefaultTVShows.png')
self.addDirectoryItem('LMAO', 'tvshows&url=tmdblmao', 'lmao.PNG', 'DefaultTVShows.png')
self.addDirectoryItem('Life in The Fast Lane', 'tvshows&url=tmdbfasttv', 'fast.png', 'DefaultTVShows.png')
self.addDirectoryItem('Out Of This World', 'tvshows&url=tmdbufotv', 'ufo.png', 'DefaultTVShows.png')
self.addDirectoryItem('Streets of The Chi', 'tvshows&url=tmdbchitv', 'chi.png', 'DefaultTVShows.png')
self.addDirectoryItem('Sports', 'tvshows&url=tmdbsportstv', 'sports.png', 'DefaultTVShows.png')
self.endDirectory()
def kidstvCollections(self):
self.addDirectoryItem('Animation Station', 'tvshows&url=tmdbanimationtv', 'lego.png', 'DefaultTVShows.png')
self.addDirectoryItem('Back In The Day Cartoons', 'tvshows&url=tmdbcartoon', 'cartoon.png', 'DefaultTVShows.png')
self.addDirectoryItem('Kids TV', 'tvshows&url=tmdbkids', 'kidsfav.png', 'DefaultTVShows.png')
self.addDirectoryItem('Lil Ones', 'tvshows&url=tmdblittle', 'lil.png', 'DefaultTVShows.png')
self.endDirectory()
def endDirectory(self):
# control.do_block_check(False)
control.directory(syshandle, cacheToDisc=True)
|
TheWardoctor/Wardoctors-repo
|
plugin.video.master.reborn/resources/lib/indexers/navigator.py
|
Python
|
apache-2.0
| 36,796
|
[
"BLAST"
] |
e6de981a75f03f186eabf540a4642f9a3e2cd89a9d541697c68537cdb140959b
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Multivariate Normal distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
def _assert_compatible_shapes(mu, sigma):
r_mu = array_ops.rank(mu)
r_sigma = array_ops.rank(sigma)
sigma_shape = array_ops.shape(sigma)
sigma_rank = array_ops.rank(sigma)
mu_shape = array_ops.shape(mu)
return control_flow_ops.group(
logging_ops.Assert(
math_ops.equal(r_mu + 1, r_sigma),
["Rank of mu should be one less than rank of sigma, but saw: ",
r_mu, " vs. ", r_sigma]),
logging_ops.Assert(
math_ops.equal(
array_ops.gather(sigma_shape, sigma_rank - 2),
array_ops.gather(sigma_shape, sigma_rank - 1)),
["Last two dimensions of sigma (%s) must be equal: " % sigma.name,
sigma_shape]),
logging_ops.Assert(
math_ops.reduce_all(math_ops.equal(
mu_shape,
array_ops.slice(
sigma_shape, [0], array_ops.pack([sigma_rank - 1])))),
["mu.shape and sigma.shape[:-1] must match, but saw: ",
mu_shape, " vs. ", sigma_shape]))
def _assert_batch_positive_definite(sigma_chol):
"""Add assertions checking that the sigmas are all Positive Definite.
Given `sigma_chol == cholesky(sigma)`, it is sufficient to check that
`all(diag(sigma_chol) > 0)`. This is because to check that a matrix is PD,
it is sufficient that its cholesky factorization is PD, and to check that a
triangular matrix is PD, it is sufficient to check that its diagonal
entries are positive.
Args:
sigma_chol: N-D. The lower triangular cholesky decomposition of `sigma`.
Returns:
An assertion op to use with `control_dependencies`, verifying that
`sigma_chol` is positive definite.
"""
sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
return logging_ops.Assert(
math_ops.reduce_all(sigma_batch_diag > 0),
["sigma_chol is not positive definite. batched diagonals: ",
sigma_batch_diag, " shaped: ", array_ops.shape(sigma_batch_diag)])
def _determinant_from_sigma_chol(sigma_chol):
det_last_dim = array_ops.rank(sigma_chol) - 2
sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
det = math_ops.square(math_ops.reduce_prod(
sigma_batch_diag, reduction_indices=det_last_dim))
det.set_shape(sigma_chol.get_shape()[:-2])
return det
class MultivariateNormal(object):
"""The Multivariate Normal distribution on `R^k`.
The distribution has mean and covariance parameters mu (1-D), sigma (2-D),
or alternatively mean `mu` and factored covariance (cholesky decomposed
`sigma`) called `sigma_chol`.
#### Mathematical details
The PDF of this distribution is:
```
f(x) = (2*pi)^(-k/2) |det(sigma)|^(-1/2) exp(-1/2*(x-mu)^*.sigma^{-1}.(x-mu))
```
where `.` denotes the inner product on `R^k` and `^*` denotes transpose.
Alternatively, if `sigma` is positive definite, it can be represented in terms
of its lower triangular cholesky factorization
```sigma = sigma_chol . sigma_chol^*```
and the pdf above allows simpler computation:
```
|det(sigma)| = reduce_prod(diag(sigma_chol))^2
x_whitened = sigma^{-1/2} . (x - mu) = tri_solve(sigma_chol, x - mu)
(x-mu)^* .sigma^{-1} . (x-mu) = x_whitened^* . x_whitened
```
where `tri_solve()` solves a triangular system of equations.
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
sigma = ... # shape 2 x 3 x 3
dist = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self, mu, sigma=None, sigma_chol=None, name=None):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, which are tensors of rank `N+1` (`N >= 0`)
with the last dimension having length `k`.
User must provide exactly one of `sigma` (the covariance matrices) or
`sigma_chol` (the cholesky decompositions of the covariance matrices).
`sigma` or `sigma_chol` must be of rank `N+2`. The last two dimensions
must both have length `k`. The first `N` dimensions correspond to batch
indices.
If `sigma_chol` is not provided, the batch cholesky factorization of `sigma`
is calculated for you.
The shapes of `mu` and `sigma` must match for the first `N` dimensions.
Regardless of which parameter is provided, the covariance matrices must all
be **positive definite** (an error is raised if one of them is not).
Args:
mu: (N+1)-D. `float` or `double` tensor, the means of the distributions.
sigma: (N+2)-D. (optional) `float` or `double` tensor, the covariances
of the distribution(s). The first `N+1` dimensions must match
those of `mu`. Must be batch-positive-definite.
sigma_chol: (N+2)-D. (optional) `float` or `double` tensor, a
lower-triangular factorization of `sigma`
(`sigma = sigma_chol . sigma_chol^*`). The first `N+1` dimensions
must match those of `mu`. The tensor itself need not be batch
lower triangular: we ignore the upper triangular part. However,
the batch diagonals must be positive (i.e., sigma_chol must be
batch-positive-definite).
name: The name to give Ops created by the initializer.
Raises:
ValueError: if neither sigma nor sigma_chol is provided.
TypeError: if mu and sigma (resp. sigma_chol) are different dtypes.
"""
if (sigma is None) == (sigma_chol is None):
raise ValueError("Exactly one of sigma and sigma_chol must be provided")
with ops.op_scope([mu, sigma, sigma_chol], name, "MultivariateNormal"):
sigma_or_half = sigma_chol if sigma is None else sigma
mu = ops.convert_to_tensor(mu)
sigma_or_half = ops.convert_to_tensor(sigma_or_half)
contrib_tensor_util.assert_same_float_dtype((mu, sigma_or_half))
with ops.control_dependencies([
_assert_compatible_shapes(mu, sigma_or_half)]):
mu = array_ops.identity(mu, name="mu")
# Store the dimensionality of the MVNs
self._k = array_ops.gather(array_ops.shape(mu), array_ops.rank(mu) - 1)
if sigma_chol is not None:
# Ensure we only keep the lower triangular part.
sigma_chol = array_ops.batch_matrix_band_part(
sigma_chol, num_lower=-1, num_upper=0)
sigma_det = _determinant_from_sigma_chol(sigma_chol)
with ops.control_dependencies([
_assert_batch_positive_definite(sigma_chol)]):
self._sigma = math_ops.batch_matmul(
sigma_chol, sigma_chol, adj_y=True, name="sigma")
self._sigma_chol = array_ops.identity(sigma_chol, "sigma_chol")
self._sigma_det = array_ops.identity(sigma_det, "sigma_det")
self._mu = array_ops.identity(mu, "mu")
else: # sigma is not None
sigma_chol = linalg_ops.batch_cholesky(sigma)
sigma_det = _determinant_from_sigma_chol(sigma_chol)
# batch_cholesky checks for PSD; so we can just use it here.
with ops.control_dependencies([sigma_chol]):
self._sigma = array_ops.identity(sigma, "sigma")
self._sigma_chol = array_ops.identity(sigma_chol, "sigma_chol")
self._sigma_det = array_ops.identity(sigma_det, "sigma_det")
self._mu = array_ops.identity(mu, "mu")
@property
def dtype(self):
return self._mu.dtype
@property
def mu(self):
return self._mu
@property
def sigma(self):
return self._sigma
@property
def mean(self):
return self._mu
@property
def sigma_det(self):
return self._sigma_det
def log_pdf(self, x, name=None):
"""Log pdf of observations `x` given these Multivariate Normals.
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.op_scope(
[self._mu, self._sigma_chol, x], name, "MultivariateNormalLogPdf"):
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
x_centered = x - self.mu
x_rank = array_ops.rank(x_centered)
sigma_rank = array_ops.rank(self._sigma_chol)
x_rank_vec = array_ops.pack([x_rank])
sigma_rank_vec = array_ops.pack([sigma_rank])
x_shape = array_ops.shape(x_centered)
# sigma_chol is shaped [D, E, F, ..., k, k]
# x_centered shape is one of:
# [D, E, F, ..., k], or [F, ..., k], or
# [A, B, C, D, E, F, ..., k]
# and we need to convert x_centered to shape:
# [D, E, F, ..., k, A*B*C] (or 1 if A, B, C don't exist)
# then transpose and reshape x_whitened back to one of the shapes:
# [D, E, F, ..., k], or [1, 1, F, ..., k], or
# [A, B, C, D, E, F, ..., k]
# This helper handles the case where rank(x_centered) < rank(sigma)
def _broadcast_x_not_higher_rank_than_sigma():
return array_ops.reshape(
x_centered,
array_ops.concat(
# Reshape to ones(deficient x rank) + x_shape + [1]
0, (array_ops.ones(array_ops.pack([sigma_rank - x_rank - 1]),
dtype=x_rank.dtype),
x_shape,
[1])))
# These helpers handle the case where rank(x_centered) >= rank(sigma)
def _broadcast_x_higher_rank_than_sigma():
x_shape_left = array_ops.slice(
x_shape, [0], sigma_rank_vec - 1)
x_shape_right = array_ops.slice(
x_shape, sigma_rank_vec - 1, x_rank_vec - 1)
x_shape_perm = array_ops.concat(
0, (math_ops.range(sigma_rank - 1, x_rank),
math_ops.range(0, sigma_rank - 1)))
return array_ops.reshape(
# Convert to [D, E, F, ..., k, B, C]
array_ops.transpose(
x_centered, perm=x_shape_perm),
# Reshape to [D, E, F, ..., k, B*C]
array_ops.concat(
0, (x_shape_right,
array_ops.pack([
math_ops.reduce_prod(x_shape_left, 0)]))))
def _unbroadcast_x_higher_rank_than_sigma():
x_shape_left = array_ops.slice(
x_shape, [0], sigma_rank_vec - 1)
x_shape_right = array_ops.slice(
x_shape, sigma_rank_vec - 1, x_rank_vec - 1)
x_shape_perm = array_ops.concat(
0, (math_ops.range(sigma_rank - 1, x_rank),
math_ops.range(0, sigma_rank - 1)))
return array_ops.transpose(
# [D, E, F, ..., k, B, C] => [B, C, D, E, F, ..., k]
array_ops.reshape(
# convert to [D, E, F, ..., k, B, C]
x_whitened_broadcast,
array_ops.concat(0, (x_shape_right, x_shape_left))),
perm=x_shape_perm)
# Step 1: reshape x_centered
x_centered_broadcast = control_flow_ops.cond(
# x_centered == [D, E, F, ..., k] => [D, E, F, ..., k, 1]
# or == [F, ..., k] => [1, 1, F, ..., k, 1]
x_rank <= sigma_rank - 1,
_broadcast_x_not_higher_rank_than_sigma,
# x_centered == [B, C, D, E, F, ..., k] => [D, E, F, ..., k, B*C]
_broadcast_x_higher_rank_than_sigma)
x_whitened_broadcast = linalg_ops.batch_matrix_triangular_solve(
self._sigma_chol, x_centered_broadcast)
# Reshape x_whitened_broadcast back to x_whitened
x_whitened = control_flow_ops.cond(
x_rank <= sigma_rank - 1,
lambda: array_ops.reshape(x_whitened_broadcast, x_shape),
_unbroadcast_x_higher_rank_than_sigma)
x_whitened = array_ops.expand_dims(x_whitened, -1)
# Reshape x_whitened to contain row vectors
# Returns a batchwise scalar
x_whitened_norm = math_ops.batch_matmul(
x_whitened, x_whitened, adj_x=True)
x_whitened_norm = control_flow_ops.cond(
x_rank <= sigma_rank - 1,
lambda: array_ops.squeeze(x_whitened_norm, [-2, -1]),
lambda: array_ops.squeeze(x_whitened_norm, [-1]))
log_two_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
k = math_ops.cast(self._k, self.dtype)
log_pdf_value = (
-math_ops.log(self._sigma_det) -k * log_two_pi - x_whitened_norm) / 2
final_shaped_value = control_flow_ops.cond(
x_rank <= sigma_rank - 1,
lambda: log_pdf_value,
lambda: array_ops.squeeze(log_pdf_value, [-1]))
output_static_shape = x_centered.get_shape()[:-1]
final_shaped_value.set_shape(output_static_shape)
return final_shaped_value
def pdf(self, x, name=None):
"""The PDF of observations `x` under these Multivariate Normals.
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
with ops.op_scope(
[self._mu, self._sigma_chol, x], name, "MultivariateNormalPdf"):
return math_ops.exp(self.log_pdf(x))
def entropy(self, name=None):
"""The entropies of these Multivariate Normals.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropies.
"""
with ops.op_scope(
[self._mu, self._sigma_chol], name, "MultivariateNormalEntropy"):
one_plus_log_two_pi = constant_op.constant(
1 + math.log(2 * math.pi), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._k, dtype=self.dtype)
entropy_value = (
k * one_plus_log_two_pi + math_ops.log(self._sigma_det)) / 2
entropy_value.set_shape(self._sigma_det.get_shape())
return entropy_value
def sample(self, n, seed=None, name=None):
"""Sample `n` observations from the Multivariate Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.op_scope(
[self._mu, self._sigma_chol, n], name, "MultivariateNormalSample"):
# TODO(ebrevdo): Is there a better way to get broadcast_shape?
broadcast_shape = self.mu.get_shape()
n = ops.convert_to_tensor(n)
sigma_shape_left = array_ops.slice(
array_ops.shape(self._sigma_chol),
[0], array_ops.pack([array_ops.rank(self._sigma_chol) - 2]))
k_n = array_ops.pack([self._k, n])
shape = array_ops.concat(0, [sigma_shape_left, k_n])
white_samples = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
correlated_samples = math_ops.batch_matmul(
self._sigma_chol, white_samples)
# Move the last dimension to the front
perm = array_ops.concat(
0,
(array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
samples.set_shape(final_shape)
return samples
@property
def is_reparameterized(self):
return True
|
ivano666/tensorflow
|
tensorflow/contrib/distributions/python/ops/mvn.py
|
Python
|
apache-2.0
| 17,863
|
[
"Gaussian"
] |
7820f53a56ae0b88bc502aabe5116e1589afefe30ffbfc4aa1ebf04023966d01
|
"""Functions to plot epochs data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: Simplified BSD
from collections import Counter
from functools import partial
import copy
import numpy as np
from ..utils import verbose, get_config, set_config, logger, warn
from ..io.pick import pick_types, channel_type, _get_channel_types
from ..time_frequency import psd_multitaper
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_layout_figure, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show, _check_cov,
_compute_scalings, DraggableColorbar, _setup_cmap,
_grad_pair_pick_and_name, _handle_decim,
_setup_plot_projector, _set_ax_label_style)
from .misc import _handle_event_colors
from ..defaults import _handle_default
from ..externals.six import string_types
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap=None, fig=None,
axes=None, overlay_times=None, combine=None,
group_by=None, evoked=True, ts_args=dict(), title=None):
"""Plot Event Related Potential / Fields image.
Parameters
----------
epochs : instance of Epochs
The epochs.
picks : int | array-like of int | None
The indices of the channels to consider. If None and ``combine`` is
also None, the first five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
Defaults to 0.
vmin : None | float | callable
The min value in the image (and the ER[P/F]). The unit is uV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
Hint: to specify the lower limit of the data, use
``vmin=lambda data: data.min()``.
vmax : None | float | callable
The max value in the image (and the ER[P/F]). The unit is uV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : None | matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True).
If None, "RdBu_r" is used, unless the data is all positive, in which
case "Reds" is used.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
axes : list of matplotlib axes | dict of lists of matplotlib Axes | None
List of axes instances to draw the image, erp and colorbar to.
Must be of length three if colorbar is True (with the last list element
being the colorbar axes) or two if colorbar is False. If both fig and
axes are passed, an error is raised.
If ``group_by`` is a dict, this cannot be a list, but it can be a dict
of lists of axes, with the keys matching those of ``group_by``. In that
case, the provided axes will be used for the corresponding groups.
Defaults to `None`.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
combine : None | str | callable
If None, return one figure per pick. If not None, aggregate over
channels via the indicated method. If str, must be one of "mean",
"median", "std" or "gfp", in which case the mean, the median, the
standard deviation or the GFP over channels are plotted.
array (n_epochs, n_times).
If callable, it must accept one positional input, the data
in the format `(n_epochs, n_channels, n_times)`. It must return an
array `(n_epochs, n_times)`. For example::
combine = lambda data: np.median(data, axis=1)
Defaults to `None` if picks are provided, otherwise 'gfp'.
group_by : None | str | dict
If not None, combining happens over channel groups defined by this
parameter.
If str, must be "type", in which case one figure per channel type is
returned (combining within channel types).
If a dict, the values must be picks and one figure will be returned
for each entry, aggregating over the corresponding pick groups; keys
will become plot titles. This is useful for e.g. ROIs. Each entry must
contain only one channel type. For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
If not None, combine must not be None. Defaults to `None` if picks are
provided, otherwise 'type'.
evoked : Bool
Draw the ER[P/F] below the image or not.
ts_args : dict
Arguments passed to a call to `mne.viz.plot_compare_evoked` to style
the evoked plot below the image. Defaults to an empty dictionary,
meaning `plot_compare_evokeds` will be called with default parameters
(yaxis truncation will be turned off, and inversion of the y axis
via `invert_y=True` will raise an error).
title : None | str
If str, will be plotted as figure title. Else, the channels will be
indicated.
Returns
-------
figs : lists of matplotlib figures
One figure per channel displayed.
"""
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
# setting defaults
if group_by is not None and combine is None:
combine = 'gfp'
if all(param is None for param in (group_by, picks, combine)):
group_by = "type"
combine = "gfp"
if combine is not None:
ts_args["show_sensors"] = False
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if group_by is None:
logger.info("No picks and no groupby, showing the first five "
"channels ...")
picks = picks[:5] # take 5 picks to prevent spawning many figs
else:
picks = np.atleast_1d(picks)
if "invert_y" in ts_args:
raise NotImplementedError("'invert_y' found in 'ts_args'. "
"This is currently not implemented.")
manual_ylims = "ylim" in ts_args
vlines = ts_args.get(
"vlines", [0] if (epochs.times[0] < 0 < epochs.times[-1]) else [])
# input checks
if (combine is None and (fig is not None or axes is not None) and
len(picks) > 1):
raise ValueError('Only single pick can be drawn to a figure/axis; '
'provide only one pick, or use `combine`.')
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
ch_types = _get_channel_types(epochs.info, picks=picks, unique=False)
if len(set(ch_types)) > 1 and group_by is None and combine is not None:
warn("Combining over multiple channel types. Please use `group_by`.")
for ch_type in set(ch_types):
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
if isinstance(axes, dict):
show = False
if not isinstance(group_by, dict):
raise ValueError("If axes is a dict, group_by must be a dict, "
"got " + str(type(group_by)))
else:
if axes is not None and isinstance(group_by, dict):
raise ValueError("If ``group_by`` is a dict, axes must be a dict "
"or None, got " + str(type(group_by)))
if isinstance(group_by, dict) and combine is None:
raise ValueError("If ``group_by`` is a dict, ``combine`` must not be "
"None.")
# call helpers to prepare the plot
# First, we collect groupings of picks and types in two lists
# (all_picks, all_ch_types, names) -> group_by.
# Then, we construct a list of the corresponding data, names and evokeds
# (groups) -> combine.
# Then, we loop over this list and plot using _plot_epochs_image.
# group_by
all_picks, all_ch_types, names = _get_picks_and_types(
picks, ch_types, group_by, combine)
# all_picks is a list of lists of ints (picks); those lists will
# be length 1 if combine is None, else of length > 1.
# combine/construct list for plotting
groups = _pick_and_combine(epochs, combine, all_picks, all_ch_types, names)
# each entry of groups is: (data, ch_type, evoked, name)
# prepare the image - required for uniform vlims
vmins, vmaxs = dict(), dict()
for group in groups:
epochs, ch_type = group[:2]
group.extend(_prepare_epochs_image_im_data(
epochs, ch_type, overlay_times, order, sigma, vmin, vmax,
scalings[ch_type], ts_args))
if vmin is None or vmax is None: # equalize across groups
this_vmin, this_vmax, this_ylim = group[-3:]
if vmin is None and (this_vmin < vmins.get(ch_type, 1)):
vmins[ch_type] = this_vmin
if vmax is None and (this_vmax > vmaxs.get(ch_type, -1)):
vmaxs[ch_type] = this_vmax
# plot
figs, axes_list = list(), list()
ylims = dict((ch_type, (1., -1.)) for ch_type in all_ch_types)
for (epochs_, ch_type, ax_name, name, data, overlay_times, vmin, vmax,
ts_args) in groups:
vmin, vmax = vmins.get(ch_type, vmin), vmaxs.get(ch_type, vmax)
these_axes = axes[ax_name] if isinstance(axes, dict) else axes
axes_dict = _prepare_epochs_image_axes(these_axes, fig, colorbar,
evoked)
axes_list.append(axes_dict)
title_ = ((ax_name if isinstance(axes, dict) else name)
if title is None else title)
this_fig = _plot_epochs_image(
epochs_, data, vmin=vmin, vmax=vmax, colorbar=colorbar, show=False,
unit=units[ch_type], ch_type=ch_type, cmap=cmap,
axes_dict=axes_dict, title=title_, overlay_times=overlay_times,
evoked=evoked, ts_args=ts_args)
figs.append(this_fig)
# the rest of the code is for aligning ylims for multiple plots
if evoked is True and not manual_ylims:
evoked_ax = axes_dict["evoked"]
this_min, this_max = evoked_ax.get_ylim()
curr_min, curr_max = ylims[ch_type]
ylims[ch_type] = min(curr_min, this_min), max(curr_max, this_max),
if evoked is True: # adjust ylims
for group, axes_dict in zip(groups, axes_list):
ch_type = group[1]
ax = axes_dict["evoked"]
this_ymin, this_ymax = these_ylims = ylims[ch_type]
ax.set_ylim(these_ylims)
yticks = np.array(ax.get_yticks())
max_height = yticks[yticks < this_ymax][-1]
if not manual_ylims:
ax.spines["left"].set_bounds(this_ymin, max_height)
if len(vlines) > 0:
if overlay_times is not None:
overlay = {overlay_times.mean(), np.median(overlay_times)}
else:
overlay = {}
for line in vlines:
ax.vlines(line, this_ymin, max_height, colors='k',
linestyles='-' if line in overlay else "--",
linewidth=2. if line in overlay else 1.)
plt_show(show)
return figs
def _get_picks_and_types(picks, ch_types, group_by, combine):
"""Pack picks and types into a list. Helper for plot_epochs_image."""
if group_by is None:
if combine is not None:
picks = [picks]
return picks, ch_types, ch_types
elif group_by == "type":
all_picks, all_ch_types = list(), list()
for this_type in set(ch_types):
these_picks = picks[np.array(ch_types) == this_type]
all_picks.append(these_picks)
all_ch_types.append(this_type)
names = all_ch_types # only differs for dict group_by
elif isinstance(group_by, dict):
names = list(group_by.keys())
all_picks = [group_by[name] for name in names]
for name, picks_ in group_by.items():
n_picks = len(picks_)
if n_picks < 2:
raise ValueError(" ".join(
(name, "has only ", str(n_picks), "sensors.")))
all_ch_types = list()
for picks_, name in zip(all_picks, names):
this_ch_type = list(set((ch_types[pick] for pick in picks_)))
n_types = len(this_ch_type)
if n_types > 1: # we can only scale properly with 1 type
raise ValueError(
"Roi {} contains {} sensor types!".format(
name, n_types))
all_ch_types.append(this_ch_type[0])
names.append(name)
else:
raise ValueError("If ``group_by`` is not None, it must be a dict "
"or 'type', got " + str(type(group_by)))
return all_picks, all_ch_types, names # all_picks is a list of lists
def _pick_and_combine(epochs, combine, all_picks, all_ch_types, names):
"""Pick and combine epochs image. Helper for plot_epochs_image."""
to_plot_list = list()
tmin = epochs.times[0]
if combine is None:
if epochs.preload is False:
epochs = epochs.copy().load_data() # FIXME: avoid copy
for pick, ch_type in zip(all_picks, all_ch_types):
name = epochs.ch_names[pick]
these_epochs = epochs.copy().pick_channels([name])
to_plot_list.append([these_epochs, ch_type, name, name])
return to_plot_list
# if combine is not None ...
from .. import EpochsArray, pick_info
data = epochs.get_data()
type2name = {"eeg": "EEG", "grad": "Gradiometers",
"mag": "Magnetometers"}
combine_title = (" (" + combine + ")"
if isinstance(combine, string_types) else "")
if combine == "gfp":
def combine(data):
return np.sqrt((data * data).mean(axis=1))
elif combine == "mean":
def combine(data):
return np.mean(data, axis=1)
elif combine == "std":
def combine(data):
return np.std(data, axis=1)
elif combine == "median":
def combine(data):
return np.median(data, axis=1)
elif not callable(combine):
raise ValueError(
"``combine`` must be None, a callable or one out of 'mean' "
"or 'gfp'. Got " + str(type(combine)))
for ch_type, picks_, name in zip(all_ch_types, all_picks, names):
if len(np.atleast_1d(picks_)) < 2:
raise ValueError("Cannot combine over only one sensor. "
"Consider using different values for "
"``picks`` and/or ``group_by``.")
if ch_type == "grad":
def pair_and_combine(data):
data = data ** 2
data = (data[:, ::2, :] + data[:, 1::2, :]) / 2
return combine(np.sqrt(data))
picks_ = _grad_pair_pick_and_name(epochs.info, picks_)[0]
this_data = pair_and_combine(
data[:, picks_, :])[:, np.newaxis, :]
else:
this_data = combine(
data[:, picks_, :])[:, np.newaxis, :]
info = pick_info(epochs.info, [picks_[0]], copy=True)
info['projs'] = []
these_epochs = EpochsArray(this_data.copy(), info, tmin=tmin)
to_plot_list.append([these_epochs, ch_type, name,
type2name.get(name, name) + combine_title])
return to_plot_list # epochs, ch_type, name, axtitle
def _prepare_epochs_image_im_data(epochs, ch_type, overlay_times, order,
sigma, vmin, vmax, scaling, ts_args):
"""Preprocess epochs image (sort, filter). Helper for plot_epochs_image."""
from scipy import ndimage
# data transforms - sorting, scaling, smoothing
data = epochs.get_data()[:, 0, :]
n_epochs = len(data)
if overlay_times is not None and len(overlay_times) != n_epochs:
raise ValueError('size of overlay_times parameter (%s) do not '
'match the number of epochs (%s).'
% (len(overlay_times), n_epochs))
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if ((times_min < epochs.times[0]) or (times_max > epochs.times[-1])):
warn('Some values in overlay_times fall outside of the epochs '
'time interval (between %s s and %s s)'
% (epochs.times[0], epochs.times[-1]))
if callable(order):
order = order(epochs.times, data)
if order is not None and (len(order) != n_epochs):
raise ValueError(("`order` must be None, callable or an array as long "
"as the data. Got " + str(type(order))))
if order is not None:
order = np.asarray(order)
data = data[order]
if overlay_times is not None:
overlay_times = overlay_times[order]
if sigma > 0.:
data = ndimage.gaussian_filter1d(data, sigma=sigma, axis=0)
# setup lims and cmap
scale_vmin = True if (vmin is None or callable(vmin)) else False
scale_vmax = True if (vmax is None or callable(vmax)) else False
vmin, vmax = _setup_vmin_vmax(
data, vmin, vmax, norm=(data.min() >= 0) and (vmin is None))
if not scale_vmin:
vmin /= scaling
if not scale_vmax:
vmax /= scaling
ylim = dict()
ts_args_ = dict(colors={"cond": "black"}, ylim=ylim, picks=[0], title='',
truncate_yaxis=False, truncate_xaxis=False, show=False)
ts_args_.update(**ts_args)
ts_args_["vlines"] = []
return [data * scaling, overlay_times, vmin * scaling, vmax * scaling,
ts_args_]
def _make_epochs_image_axis_grid(axes_dict=dict(), colorbar=False,
evoked=False):
"""Create axes for image plotting. Helper for plot_epochs_image."""
import matplotlib.pyplot as plt
axes_dict["image"] = axes_dict.get("image", plt.subplot2grid(
(3, 10), (0, 0), colspan=9 if colorbar else 10,
rowspan=2 if evoked else 3))
if evoked:
axes_dict["evoked"] = plt.subplot2grid(
(3, 10), (2, 0), colspan=9 if colorbar else 10, rowspan=1)
if colorbar:
axes_dict["colorbar"] = plt.subplot2grid(
(3, 10), (0, 9), colspan=1, rowspan=2 if evoked else 3)
return axes_dict
def _prepare_epochs_image_axes(axes, fig, colorbar, evoked):
"""Prepare axes for image plotting. Helper for plot_epochs_image."""
import matplotlib.pyplot as plt
# prepare fig and axes
axes_dict = dict()
if axes is None:
if fig is None:
fig = plt.figure()
plt.figure(fig.number)
axes_dict = _make_epochs_image_axis_grid(
axes_dict, colorbar, evoked)
else:
if fig is not None:
raise ValueError('Both figure and axes were passed, please'
'only pass one of these.')
from .utils import _validate_if_list_of_axes
oblig_len = 3 - ((not colorbar) + (not evoked))
_validate_if_list_of_axes(axes, obligatory_len=oblig_len)
axes_dict["image"] = axes[0]
if evoked:
axes_dict["evoked"] = axes[1]
# if axes were passed - we ignore fig param and get figure from axes
fig = axes_dict["image"].get_figure()
if colorbar:
axes_dict["colorbar"] = axes[-1]
return axes_dict
def _plot_epochs_image(epochs, data, ch_type, vmin=None, vmax=None,
colorbar=False, show=False, unit=None, cmap=None,
axes_dict=None, overlay_times=None, title=None,
evoked=False, ts_args=None):
"""Plot epochs image. Helper function for plot_epochs_image."""
if cmap is None:
cmap = "Reds" if data.min() >= 0 else 'RdBu_r'
# Plot
# draw the image
ax = axes_dict["image"]
fig = ax.get_figure()
cmap = _setup_cmap(cmap)
n_epochs = len(data)
extent = [1e3 * epochs.times[0], 1e3 * epochs.times[-1], 0, n_epochs]
im = ax.imshow(data, vmin=vmin, vmax=vmax, cmap=cmap[0], aspect='auto',
origin='lower', interpolation='nearest', extent=extent)
if overlay_times is not None:
ax.plot(1e3 * overlay_times, 0.5 + np.arange(n_epochs), 'k',
linewidth=2)
ax.set_title(title)
ax.set_ylabel('Epochs')
ax.axis('auto')
ax.axis('tight')
if overlay_times is not None:
ax.set_xlim(1e3 * epochs.times[0], 1e3 * epochs.times[-1])
ax.axvline(0, color='k', linewidth=1, linestyle='--')
# draw the evoked
if evoked:
from mne.viz import plot_compare_evokeds
plot_compare_evokeds(
{"cond": list(epochs.iter_evoked())}, axes=axes_dict["evoked"],
**ts_args)
axes_dict["evoked"].set_xlim(epochs.times[[0, -1]])
ax.set_xticks(())
# draw the colorbar
if colorbar:
import matplotlib.pyplot as plt
cbar = plt.colorbar(im, cax=axes_dict['colorbar'])
cbar.ax.set_ylabel(unit + "\n\n", rotation=270)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
tight_layout(fig=fig)
fig._axes_dict = axes_dict # storing this here for easy access later
# finish
plt_show(show)
return fig
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs.
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
n_used = 0
for d in drop_log: # "d" is the list of drop reasons for each epoch
if len(d) == 0 or any(ch not in ignore for ch in d):
n_used += 1 # number of epochs not ignored
counts = 100 * np.array(list(scores.values()), dtype=float) / n_used
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Handle drawing epochs axes."""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for l, d in zip(ax.lines, data_[good_ch_idx]):
l.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for l, d in zip(bad_lines, data_[bad_ch_idx]):
l.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for l in ax.lines:
l.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for l in ax.lines[:len(good_ch_idx)]:
l.set_color('k')
if bad_ch_idx is not None:
for l in ax.lines[-len(bad_ch_idx):]:
l.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Handle epochs navigation click."""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Handle epochs axes click."""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for l in ax.lines:
l.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for l in good_lines:
l.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for l in bad_lines:
l.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_colors=None, show=True,
block=False, decim='auto', noise_cov=None):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | 'auto' | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded,
a subset of epochs up to 100mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4,
whitened=10.)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
events : None, array, shape (n_events, 3)
Events to show with vertical bars. If events are provided, the epoch
numbers are not shown to prevent overlap. You can toggle epoch
numbering through options (press 'o' key). You can use
:func:`mne.viz.plot_events` as a legend for the colors. By default, the
coloring scheme is the same.
.. warning:: If the epochs have been resampled, the events no longer
align with the data.
.. versionadded:: 0.14.0
event_colors : None, dict
Dictionary of event_id value and its associated color. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Uses the same
coloring scheme as :func:`mne.viz.plot_events`.
.. versionadded:: 0.14.0
show : bool
Show figure if True. Defaults to True
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate at least three times
larger than ``info['lowpass']`` (e.g., a 40 Hz lowpass will result in
at least a 120 Hz displayed sample rate).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. These can also be set through
options dialog by pressing ``o`` key. ``h`` key plots a histogram of
peak-to-peak values along with the used rejection thresholds. Butterfly
plot can be toggled with ``b`` key. Right mouse click adds a vertical line
to the plot. Click 'help' button at bottom left corner of the plotter to
view all the options.
.. versionadded:: 0.10.0
"""
epochs.drop_bad()
scalings = _compute_scalings(scalings, epochs)
scalings = _handle_default('scalings_plot_raw', scalings)
decim, data_picks = _handle_decim(epochs.info.copy(), decim, None)
projs = epochs.info['projs']
noise_cov = _check_cov(noise_cov, epochs.info)
params = dict(epochs=epochs, info=epochs.info.copy(), t_start=0.,
bad_color=(0.8, 0.8, 0.8), histogram=None, decim=decim,
data_picks=data_picks, noise_cov=noise_cov,
use_noise_cov=noise_cov is not None)
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, events=events,
event_colors=event_colors)
_prepare_projectors(params)
_layout_figure(params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33, dB=True, n_jobs=1,
show=True, verbose=None):
"""Plot the power spectral density across epochs.
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from .raw import _set_psd_plot_params, _convert_psds
fig, picks_list, titles_list, units_list, scalings_list, ax_list, \
make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = psd_multitaper(epochs, picks=picks, fmin=fmin,
fmax=fmax, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, proj=proj,
n_jobs=n_jobs)
ylabel = _convert_psds(psds, dB, 'auto', scalings_list[ii],
units_list[ii],
[epochs.ch_names[pi] for pi in picks])
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Frequency (Hz)')
ax.set(ylabel=ylabel, title=title, xlim=(freqs[0], freqs[-1]))
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, events=None, event_colors=None,
order=None):
"""Set up the mne_browse_epochs window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
if picks is None:
picks = _handle_picks(epochs)
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
picks = sorted(picks)
# Reorganize channels
inds = list()
types = list()
for t in ['grad', 'mag']:
idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
if len(idxs) < 1:
continue
mask = np.in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
for t in ['hbo', 'hbr']:
idxs = pick_types(params['info'], meg=False, ref_meg=False, fnirs=t,
exclude=[])
if len(idxs) < 1:
continue
mask = np.in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if order is None:
order = ['eeg', 'seeg', 'ecog', 'eog', 'ecg', 'emg', 'ref_meg', 'stim',
'resp', 'misc', 'chpi', 'syst', 'ias', 'exci']
for ch_type in order:
pick_kwargs[ch_type] = True
idxs = pick_types(params['info'], **pick_kwargs)
if len(idxs) < 1:
continue
mask = np.in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [ch_type] * len(inds[-1])
pick_kwargs[ch_type] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][x] for x in inds]
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
n_epochs = min(n_epochs, len(epochs.events))
duration = len(epochs.times) * n_epochs
n_channels = min(n_channels, len(picks))
if size is not None:
size = size.split(',')
size = tuple(float(s) for s in size)
if title is None:
title = epochs._name
if title is None or len(title) == 0:
title = ''
fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
fig.canvas.set_window_title('mne_browse_epochs')
ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
ha='center', va='bottom', size=12, xycoords='axes fraction',
textcoords='offset points')
color = _handle_default('color', None)
ax.axis([0, duration, 0, 200])
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.axis([0, duration, 0, 200])
ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Epochs')
ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
ax_vscroll.set_axis_off()
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=3))
ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# populate vertical and horizontal scrollbars
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=4))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=5)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs.events))
lines = list()
n_times = len(epochs.times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=True, linewidths=0.5,
zorder=3, picker=3.)
ax.add_collection(lc)
lines.append(lc)
times = epochs.times
data = np.zeros((params['info']['nchan'], len(times) * n_epochs))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(times) * len(epochs.events))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
ax.set_xticks(ticks)
ax2.set_xticks(ticks[:n_epochs])
labels = list(range(1, len(ticks) + 1)) # epoch numbers
ax.set_xticklabels(labels)
xlim = epoch_times[-1] + len(epochs.times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x / n_times + 1 for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=3, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
epoch_nr = True
if events is not None:
event_set = set(events[:, 2])
event_colors = _handle_event_colors(event_set, event_colors, event_set)
epoch_nr = False # epoch number off by default to avoid overlap
for label in ax.xaxis.get_ticklabels():
label.set_visible(False)
params.update({'fig': fig,
'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines,
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': False,
'text': text,
'ax_help_button': ax_help_button, # needed for positioning
'help_button': help_button, # reference needed for clicks
'fig_options': None,
'settings': [True, True, epoch_nr, True],
'image_plot': None,
'events': events,
'event_colors': event_colors,
'ev_lines': list(),
'ev_texts': list()})
params['plot_fun'] = partial(_plot_traces, params=params)
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_resize = partial(_resize_event, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
params['callback_key'] = callback_key
# Draw event lines for the first time.
_plot_vert_lines(params)
# default key to close window
params['close_key'] = 'escape'
def _prepare_projectors(params):
"""Set up the projectors for epochs browser."""
import matplotlib.pyplot as plt
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')
def _plot_traces(params):
"""Plot concatenated epochs."""
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
n_times = len(epochs.times)
tick_list = list()
start_idx = int(params['t_start'] / n_times)
end = params['t_start'] + params['duration']
end_idx = int(end / n_times)
xlabels = params['labels'][start_idx:]
event_ids = params['epochs'].events[:, 2]
params['ax2'].set_xticklabels(event_ids[start_idx:])
ax.set_xticklabels(xlabels)
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
ch_type = params['types'][ch_idx]
if ch_type == 'grad':
offset = offsets[0]
elif ch_type == 'mag':
offset = offsets[1]
elif ch_type == 'eeg':
offset = offsets[2]
elif ch_type == 'eog':
offset = offsets[3]
elif ch_type == 'ecg':
offset = offsets[4]
else:
lines[line_idx].set_segments(list())
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
if params['inds'][ch_idx] in params['data_picks']:
this_decim = params['decim']
else:
this_decim = 1
this_data = data[ch_idx]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'], len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
segments = [segment[::this_decim] for segment in segments]
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx > end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(2)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(3)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
factor = -1. / params['butterfly_scale']
labels = [''] * 20
ticks = ax.get_yticks()
idx_offset = 1
# XXX eventually these scale factors should use "scalings"
# of some sort
if 'grad' in params['types']:
labels[idx_offset + 1] = 0.
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = ((ticks[idx] - offsets[0]) *
params['scalings']['grad'] *
1e13 * factor)
idx_offset += 4
if 'mag' in params['types']:
labels[idx_offset + 1] = 0.
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = ((ticks[idx] - offsets[1]) *
params['scalings']['mag'] *
1e15 * factor)
idx_offset += 4
if 'eeg' in params['types']:
labels[idx_offset + 1] = 0.
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = ((ticks[idx] - offsets[2]) *
params['scalings']['eeg'] *
1e6 * factor)
idx_offset += 4
if 'eog' in params['types']:
labels[idx_offset + 1] = 0.
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = ((ticks[idx] - offsets[3]) *
params['scalings']['eog'] *
1e6 * factor)
idx_offset += 4
if 'ecg' in params['types']:
labels[idx_offset + 1] = 0.
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = ((ticks[idx] - offsets[4]) *
params['scalings']['ecg'] *
1e6 * factor)
# Heuristic to turn floats to ints where possible (e.g. -500.0 to -500)
for li, label in enumerate(labels):
if isinstance(label, float) and float(str(label)) == round(label):
labels[li] = int(round(label))
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticklabels(tick_list, fontsize=12)
_set_ax_label_style(ax, params)
if params['events'] is not None: # vertical lines for events.
_draw_event_lines(params)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools=None):
"""Deal with proj changed."""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
epochs = params['epochs']
n_epochs = params['n_epochs']
params['projector'], params['whitened_ch_names'] = _setup_plot_projector(
params['info'], params['noise_cov'], True, params['use_noise_cov'])
start = int(params['t_start'] / len(epochs.times))
end = start + n_epochs
if epochs.preload:
data = np.concatenate(epochs.get_data()[start:end], axis=1)
else:
# this is faster than epochs.get_data()[start:end] when not preloaded
data = np.concatenate(epochs[start:end].get_data(), axis=1)
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
ch_name = params['info']['ch_names'][ind]
if ch_name in params['whitened_ch_names'] and \
ch_name not in params['info']['bads']:
norm = params['scalings']['whitened']
else:
norm = params['scalings'][types[pick]]
params['data'][pick] = data[ind] / norm
params['plot_fun']()
def _handle_picks(epochs):
"""Handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
seeg=True, ecog=True, ref_meg=False, fnirs=True,
exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_update_proj_callback'](params)
def _plot_vert_lines(params):
"""Plot vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['ev_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
if params['settings'][3]: # if zeroline visible
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1: # not True if tmin > 0
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=4, alpha=0.4)
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=2)
if params['events'] is not None:
_draw_event_lines(params)
def _pick_bad_epochs(event, params):
"""Select / drop bad epochs."""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(3)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Select bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params)
def _plot_onscroll(event, params):
"""Handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Handle mouse click events."""
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'])[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
# Don't let scrollbar go outside vertical scrollbar limits
# XXX: floating point exception on some machines if this happens.
ch_start = min(
max(int(event.ydata) - params['n_channels'] // 2, 0),
len(params['ch_names']) - params['n_channels'])
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=5))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=True, linewidths=0.5,
zorder=3, picker=3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['data'] = params['data'][:, :-n_times]
params['plot_update_proj_callback'](params)
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['times']):
return
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=4))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['times']):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
params['plot_update_proj_callback'](params)
elif event.key == 'b':
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
_plot_traces(params)
elif event.key == 'w':
params['use_noise_cov'] = not params['use_noise_cov']
_plot_update_epochs_proj(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
def _prepare_butterfly(params):
"""Set up butterfly plot."""
from matplotlib.collections import LineCollection
butterfly = not params['butterfly']
if butterfly:
types = set(['grad', 'mag', 'eeg', 'eog',
'ecg']) & set(params['types'])
if len(types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
ylim = (5. * len(types), 0.)
ax.set_ylim(ylim)
offset = ylim[0] / (4. * len(types))
ticks = np.arange(0, ylim[0], offset)
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
if 'grad' in types:
pos = (0, 1 - (ticks[2] / ylim[0]))
params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'mag' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eeg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eog' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'ecg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=True, linewidths=0.5,
zorder=3, picker=3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
params['butterfly'] = butterfly
def _onpick(event, params):
"""Add a channel name on click."""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([l is event.artist for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Drop selected bad epochs (called on closing of the plot)."""
params['epochs'].drop(params['bads'])
params['epochs'].info['bads'] = params['info']['bads']
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
def _resize_event(event, params):
"""Handle resize event."""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size, set_env=False)
_layout_figure(params)
def _update_channels_epochs(event, params):
"""Change the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=True,
zorder=3, picker=3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
if params['t_start'] + n_times * n_epochs > len(params['times']):
params['t_start'] = len(params['times']) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
params['plot_update_proj_callback'](params)
def _toggle_labels(label, params):
"""Toggle axis labels."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Open the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
fig_options.canvas.set_window_title('View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show(warn=False)
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Plot histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
params['histogram'] = plt.figure()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('Count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject.keys():
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)
params['histogram'].subplots_adjust(hspace=0.6)
try:
params['histogram'].show(warn=False)
except Exception:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
plt.tight_layout(h_pad=0.7, pad=2)
def _label2idx(params, pos):
"""Handle click on labels (returns channel name and idx)."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
def _draw_event_lines(params):
"""Draw event lines."""
epochs = params['epochs']
n_times = len(epochs.times)
start_idx = int(params['t_start'] / n_times)
color = params['event_colors']
ax = params['ax']
for ev_line in params['ev_lines']:
ax.lines.remove(ev_line) # clear the view first
for ev_text in params['ev_texts']:
ax.texts.remove(ev_text)
params['ev_texts'] = list()
params['ev_lines'] = list()
t_zero = np.where(epochs.times == 0.)[0] # idx of 0s
if len(t_zero) == 0:
t_zero = epochs.times[0] * -1 * epochs.info['sfreq'] # if tmin > 0
end = params['n_epochs'] + start_idx
samp_times = params['events'][:, 0]
for idx, event in enumerate(epochs.events[start_idx:end]):
event_mask = ((event[0] - t_zero < samp_times) &
(samp_times < event[0] + n_times - t_zero))
for ev in params['events'][event_mask]:
if ev[0] == event[0]: # don't redraw the zeroline
continue
pos = [idx * n_times + ev[0] - event[0] + t_zero,
idx * n_times + ev[0] - event[0] + t_zero]
kwargs = {} if ev[2] not in color else {'color': color[ev[2]]}
params['ev_lines'].append(ax.plot(pos, ax.get_ylim(),
zorder=3, **kwargs)[0])
params['ev_texts'].append(ax.text(pos[0], ax.get_ylim()[0],
ev[2], color=color[ev[2]],
ha='center', va='top'))
|
teonlamont/mne-python
|
mne/viz/epochs.py
|
Python
|
bsd-3-clause
| 85,327
|
[
"Gaussian"
] |
0a1127369dc7e2364cad275cbe1b3ede14916257451acaf7b9dfff939be5f748
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# imports
##############################################################################
from __future__ import print_function, division
import warnings
import numbers
import numpy as np
import collections
from mdtraj.utils.six.moves import zip_longest
##############################################################################
# functions / classes
##############################################################################
class TypeCastPerformanceWarning(RuntimeWarning):
pass
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
if isinstance(val, collections.abc.Iterable):
# If they give us an iterator, let's try...
if isinstance(val, collections.abc.Sequence):
# sequences are easy. these are like lists and stuff
val = np.array(val, dtype=dtype)
else:
# this is a generator...
val = np.array(list(val), dtype=dtype)
elif np.isscalar(val) and add_newaxis_on_deficient_ndim and ndim == 1:
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val
def cast_indices(indices):
"""Check that ``indices`` are appropriate for indexing an array
Parameters
----------
indices : {None, array_like, slice}
If indices is None or slice, it'll just pass through. Otherwise, it'll
be converted to a numpy array and checked to make sure it contains
unique integers.
Returns
-------
value : {slice, np.ndarray}
Either a slice or an array of integers, depending on the input type
"""
if indices is None or isinstance(indices, slice):
return indices
if not len(indices) == len(set(indices)):
raise ValueError("indices must be unique.")
out = np.asarray(indices)
if not issubclass(out.dtype.type, np.integer):
raise ValueError('indices must be of an integer type. %s is not an integer type' % out.dtype)
return out
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : {None, int, RandomState}
Seed for a random number generator
Returns
-------
randomstate : RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
# This code is direcly from the scikit-learn project (sklearn/utils/validation.py)
# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)
# License: BSD 3 clause
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
dwhswenson/mdtraj
|
mdtraj/utils/validation.py
|
Python
|
lgpl-2.1
| 8,147
|
[
"MDTraj"
] |
2c00a77111bf2f1970c6fc1d503b01a84621a663d816fcddd0ad2bf5ea49e63d
|
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
'''
Blocks and utilities for digital modulation and demodulation.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# The presence of this file turns this directory into a Python package
import os
try:
from .digital_python import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "bindings"))
from .digital_python import *
from gnuradio import analog # just need analog for the enum
class gmskmod_bc(cpmmod_bc):
def __init__(self, samples_per_sym = 2, L = 4, beta = 0.3):
cpmmod_bc.__init__(self, analog.cpm.GAUSSIAN, 0.5, samples_per_sym, L, beta)
from .psk import *
from .qam import *
from .qamlike import *
from .bpsk import *
from .qpsk import *
from .gmsk import *
from .gfsk import *
from .cpm import *
from .crc import *
from .modulation_utils import *
from .ofdm_txrx import ofdm_tx, ofdm_rx
from .soft_dec_lut_gen import *
from .psk_constellations import *
from .qam_constellations import *
from .constellation_map_generator import *
from . import packet_utils
|
jdemel/gnuradio
|
gr-digital/python/digital/__init__.py
|
Python
|
gpl-3.0
| 1,248
|
[
"Gaussian"
] |
78b948cd51b41fc0ae863acf0b75fbf6e719329a05af1825248087c82351eb70
|
# -*- coding: utf-8 -*-
"""
celery.datastructures
~~~~~~~~~~~~~~~~~~~~~
Custom types and data structures.
"""
from __future__ import absolute_import, print_function, unicode_literals
import sys
import time
from collections import defaultdict, Mapping, MutableMapping, MutableSet
from heapq import heappush, heappop
from functools import partial
from itertools import chain
from billiard.einfo import ExceptionInfo # noqa
from kombu.utils.encoding import safe_str
from kombu.utils.limits import TokenBucket # noqa
from celery.five import items
from celery.utils.functional import LRUCache, first, uniq # noqa
try:
from django.utils.functional import LazyObject, LazySettings
except ImportError:
class LazyObject(object): # noqa
pass
LazySettings = LazyObject # noqa
DOT_HEAD = """
{IN}{type} {id} {{
{INp}graph [{attrs}]
"""
DOT_ATTR = '{name}={value}'
DOT_NODE = '{INp}"{0}" [{attrs}]'
DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]'
DOT_ATTRSEP = ', '
DOT_DIRS = {'graph': '--', 'digraph': '->'}
DOT_TAIL = '{IN}}}'
__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph',
'AttributeDictMixin', 'AttributeDict', 'DictAttribute',
'ConfigurationView', 'LimitedSet']
def force_mapping(m):
if isinstance(m, (LazyObject, LazySettings)):
m = m._wrapped
return DictAttribute(m) if not isinstance(m, Mapping) else m
class GraphFormatter(object):
_attr = DOT_ATTR.strip()
_node = DOT_NODE.strip()
_edge = DOT_EDGE.strip()
_head = DOT_HEAD.strip()
_tail = DOT_TAIL.strip()
_attrsep = DOT_ATTRSEP
_dirs = dict(DOT_DIRS)
scheme = {
'shape': 'box',
'arrowhead': 'vee',
'style': 'filled',
'fontname': 'HelveticaNeue',
}
edge_scheme = {
'color': 'darkseagreen4',
'arrowcolor': 'black',
'arrowsize': 0.7,
}
node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'}
term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'}
graph_scheme = {'bgcolor': 'mintcream'}
def __init__(self, root=None, type=None, id=None,
indent=0, inw=' ' * 4, **scheme):
self.id = id or 'dependencies'
self.root = root
self.type = type or 'digraph'
self.direction = self._dirs[self.type]
self.IN = inw * (indent or 0)
self.INp = self.IN + inw
self.scheme = dict(self.scheme, **scheme)
self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root))
def attr(self, name, value):
value = '"{0}"'.format(value)
return self.FMT(self._attr, name=name, value=value)
def attrs(self, d, scheme=None):
d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d)
return self._attrsep.join(
safe_str(self.attr(k, v)) for k, v in items(d)
)
def head(self, **attrs):
return self.FMT(
self._head, id=self.id, type=self.type,
attrs=self.attrs(attrs, self.graph_scheme),
)
def tail(self):
return self.FMT(self._tail)
def label(self, obj):
return obj
def node(self, obj, **attrs):
return self.draw_node(obj, self.node_scheme, attrs)
def terminal_node(self, obj, **attrs):
return self.draw_node(obj, self.term_scheme, attrs)
def edge(self, a, b, **attrs):
return self.draw_edge(a, b, **attrs)
def _enc(self, s):
return s.encode('utf-8', 'ignore')
def FMT(self, fmt, *args, **kwargs):
return self._enc(fmt.format(
*args, **dict(kwargs, IN=self.IN, INp=self.INp)
))
def draw_edge(self, a, b, scheme=None, attrs=None):
return self.FMT(
self._edge, self.label(a), self.label(b),
dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme),
)
def draw_node(self, obj, scheme=None, attrs=None):
return self.FMT(
self._node, self.label(obj), attrs=self.attrs(attrs, scheme),
)
class CycleError(Exception):
"""A cycle was detected in an acyclic graph."""
class DependencyGraph(object):
"""A directed acyclic graph of objects and their dependencies.
Supports a robust topological sort
to detect the order in which they must be handled.
Takes an optional iterator of ``(obj, dependencies)``
tuples to build the graph from.
.. warning::
Does not support cycle detection.
"""
def __init__(self, it=None, formatter=None):
self.formatter = formatter or GraphFormatter()
self.adjacent = {}
if it is not None:
self.update(it)
def add_arc(self, obj):
"""Add an object to the graph."""
self.adjacent.setdefault(obj, [])
def add_edge(self, A, B):
"""Add an edge from object ``A`` to object ``B``
(``A`` depends on ``B``)."""
self[A].append(B)
def connect(self, graph):
"""Add nodes from another graph."""
self.adjacent.update(graph.adjacent)
def topsort(self):
"""Sort the graph topologically.
:returns: a list of objects in the order
in which they must be handled.
"""
graph = DependencyGraph()
components = self._tarjan72()
NC = dict((node, component)
for component in components
for node in component)
for component in components:
graph.add_arc(component)
for node in self:
node_c = NC[node]
for successor in self[node]:
successor_c = NC[successor]
if node_c != successor_c:
graph.add_edge(node_c, successor_c)
return [t[0] for t in graph._khan62()]
def valency_of(self, obj):
"""Return the valency (degree) of a vertex in the graph."""
try:
l = [len(self[obj])]
except KeyError:
return 0
for node in self[obj]:
l.append(self.valency_of(node))
return sum(l)
def update(self, it):
"""Update the graph with data from a list
of ``(obj, dependencies)`` tuples."""
tups = list(it)
for obj, _ in tups:
self.add_arc(obj)
for obj, deps in tups:
for dep in deps:
self.add_edge(obj, dep)
def edges(self):
"""Return generator that yields for all edges in the graph."""
return (obj for obj, adj in items(self) if adj)
def _khan62(self):
"""Khans simple topological sort algorithm from '62
See http://en.wikipedia.org/wiki/Topological_sorting
"""
count = defaultdict(lambda: 0)
result = []
for node in self:
for successor in self[node]:
count[successor] += 1
ready = [node for node in self if not count[node]]
while ready:
node = ready.pop()
result.append(node)
for successor in self[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
result.reverse()
return result
def _tarjan72(self):
"""Tarjan's algorithm to find strongly connected components.
See http://bit.ly/vIMv3h.
"""
result, stack, low = [], [], {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in self[node]:
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = tuple(stack[stack_pos:])
stack[stack_pos:] = []
result.append(component)
for item in component:
low[item] = len(self)
for node in self:
visit(node)
return result
def to_dot(self, fh, formatter=None):
"""Convert the graph to DOT format.
:param fh: A file, or a file-like object to write the graph to.
"""
seen = set()
draw = formatter or self.formatter
P = partial(print, file=fh)
def if_not_seen(fun, obj):
if draw.label(obj) not in seen:
P(fun(obj))
seen.add(draw.label(obj))
P(draw.head())
for obj, adjacent in items(self):
if not adjacent:
if_not_seen(draw.terminal_node, obj)
for req in adjacent:
if_not_seen(draw.node, obj)
P(draw.edge(obj, req))
P(draw.tail())
def format(self, obj):
return self.formatter(obj) if self.formatter else obj
def __iter__(self):
return iter(self.adjacent)
def __getitem__(self, node):
return self.adjacent[node]
def __len__(self):
return len(self.adjacent)
def __contains__(self, obj):
return obj in self.adjacent
def _iterate_items(self):
return items(self.adjacent)
items = iteritems = _iterate_items
def __repr__(self):
return '\n'.join(self.repr_node(N) for N in self)
def repr_node(self, obj, level=1, fmt='{0}({1})'):
output = [fmt.format(obj, self.valency_of(obj))]
if obj in self:
for other in self[obj]:
d = fmt.format(other, self.valency_of(other))
output.append(' ' * level + d)
output.extend(self.repr_node(other, level + 1).split('\n')[1:])
return '\n'.join(output)
class AttributeDictMixin(object):
"""Augment classes with a Mapping interface by adding attribute access.
I.e. `d.key -> d[key]`.
"""
def __getattr__(self, k):
"""`d.key -> d[key]`"""
try:
return self[k]
except KeyError:
raise AttributeError(
'{0!r} object has no attribute {1!r}'.format(
type(self).__name__, k))
def __setattr__(self, key, value):
"""`d[key] = value -> d.key = value`"""
self[key] = value
class AttributeDict(dict, AttributeDictMixin):
"""Dict subclass with attribute access."""
pass
class DictAttribute(object):
"""Dict interface to attributes.
`obj[k] -> obj.k`
`obj[k] = val -> obj.k = val`
"""
obj = None
def __init__(self, obj):
object.__setattr__(self, 'obj', obj)
def __getattr__(self, key):
return getattr(self.obj, key)
def __setattr__(self, key, value):
return setattr(self.obj, key, value)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def __getitem__(self, key):
try:
return getattr(self.obj, key)
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
setattr(self.obj, key, value)
def __contains__(self, key):
return hasattr(self.obj, key)
def _iterate_keys(self):
return iter(dir(self.obj))
iterkeys = _iterate_keys
def __iter__(self):
return self._iterate_keys()
def _iterate_items(self):
for key in self._iterate_keys():
yield key, getattr(self.obj, key)
iteritems = _iterate_items
def _iterate_values(self):
for key in self._iterate_keys():
yield getattr(self.obj, key)
itervalues = _iterate_values
if sys.version_info[0] == 3: # pragma: no cover
items = _iterate_items
keys = _iterate_keys
values = _iterate_values
else:
def keys(self):
return list(self)
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
MutableMapping.register(DictAttribute)
class ConfigurationView(AttributeDictMixin):
"""A view over an applications configuration dicts.
Custom (but older) version of :class:`collections.ChainMap`.
If the key does not exist in ``changes``, the ``defaults`` dicts
are consulted.
:param changes: Dict containing changes to the configuration.
:param defaults: List of dicts containing the default configuration.
"""
changes = None
defaults = None
_order = None
def __init__(self, changes, defaults):
self.__dict__.update(changes=changes, defaults=defaults,
_order=[changes] + defaults)
def add_defaults(self, d):
d = force_mapping(d)
self.defaults.insert(0, d)
self._order.insert(1, d)
def __getitem__(self, key):
for d in self._order:
try:
return d[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.changes[key] = value
def first(self, *keys):
return first(None, (self.get(key) for key in keys))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def clear(self):
"""Remove all changes, but keep defaults."""
self.changes.clear()
def setdefault(self, key, default):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, *args, **kwargs):
return self.changes.update(*args, **kwargs)
def __contains__(self, key):
return any(key in m for m in self._order)
def __bool__(self):
return any(self._order)
__nonzero__ = __bool__ # Py2
def __repr__(self):
return repr(dict(items(self)))
def __iter__(self):
return self._iterate_keys()
def __len__(self):
# The logic for iterating keys includes uniq(),
# so to be safe we count by explicitly iterating
return len(set().union(*self._order))
def _iter(self, op):
# defaults must be first in the stream, so values in
# changes takes precedence.
return chain(*[op(d) for d in reversed(self._order)])
def _iterate_keys(self):
return uniq(self._iter(lambda d: d))
iterkeys = _iterate_keys
def _iterate_items(self):
return ((key, self[key]) for key in self)
iteritems = _iterate_items
def _iterate_values(self):
return (self[key] for key in self)
itervalues = _iterate_values
if sys.version_info[0] == 3: # pragma: no cover
keys = _iterate_keys
items = _iterate_items
values = _iterate_values
else: # noqa
def keys(self):
return list(self._iterate_keys())
def items(self):
return list(self._iterate_items())
def values(self):
return list(self._iterate_values())
MutableMapping.register(ConfigurationView)
class LimitedSet(object):
"""Kind-of Set with limitations.
Good for when you need to test for membership (`a in set`),
but the list might become to big.
:keyword maxlen: Maximum number of members before we start
evicting expired members.
:keyword expires: Time in seconds, before a membership expires.
"""
def __init__(self, maxlen=None, expires=None, data=None, heap=None):
self.maxlen = maxlen
self.expires = expires
self._data = {} if data is None else data
self._heap = [] if heap is None else heap
# make shortcuts
self.__len__ = self._heap.__len__
self.__iter__ = self._heap.__iter__
self.__contains__ = self._data.__contains__
def add(self, value, now=time.time):
"""Add a new member."""
# offset is there to modify the length of the list,
# this way we can expire an item before inserting the value,
# and it will end up in correct order.
self.purge(1, offset=1)
inserted = now()
self._data[value] = inserted
heappush(self._heap, (inserted, value))
def clear(self):
"""Remove all members"""
self._data.clear()
self._heap[:] = []
def discard(self, value):
"""Remove membership by finding value."""
try:
itime = self._data[value]
except KeyError:
return
try:
self._heap.remove((value, itime))
except ValueError:
pass
self._data.pop(value, None)
pop_value = discard # XXX compat
def purge(self, limit=None, offset=0, now=time.time):
"""Purge expired items."""
H, maxlen = self._heap, self.maxlen
if not maxlen:
return
# If the data/heap gets corrupted and limit is None
# this will go into an infinite loop, so limit must
# have a value to guard the loop.
limit = len(self) + offset if limit is None else limit
i = 0
while len(self) + offset > maxlen:
if i >= limit:
break
try:
item = heappop(H)
except IndexError:
break
if self.expires:
if now() < item[0] + self.expires:
heappush(H, item)
break
try:
self._data.pop(item[1])
except KeyError: # out of sync with heap
pass
i += 1
def update(self, other, heappush=heappush):
if isinstance(other, LimitedSet):
self._data.update(other._data)
self._heap.extend(other._heap)
self._heap.sort()
else:
for obj in other:
self.add(obj)
def as_dict(self):
return self._data
def __eq__(self, other):
return self._heap == other._heap
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'LimitedSet({0})'.format(len(self))
def __iter__(self):
return (item[1] for item in self._heap)
def __len__(self):
return len(self._heap)
def __contains__(self, key):
return key in self._data
def __reduce__(self):
return self.__class__, (
self.maxlen, self.expires, self._data, self._heap,
)
MutableSet.register(LimitedSet)
|
hubert667/AIR
|
build/celery/celery/datastructures.py
|
Python
|
gpl-3.0
| 18,598
|
[
"VisIt"
] |
83ba955c97537b7226685bf21d6548488ccc594b221121cd07ee65834a2462e1
|
from go_somewhere_significant import *
from go_to_adjacent_systems import *
import Briefing
import Director
import VS
import debug
import faction_ships
import launch
import quest
import unit
import universe
import vsrandom
class escort_local (Director.Mission):
def __init__(self, factionname, numsystemsaway, enemyquantity, waves, distance_from_base, creds, incoming, protectivefactionname='', jumps=(), var_to_set='', dynamic_attack_fg='', dynamic_type='', dynamic_defend_fg='', dynamic_defend_type='', greetingText=['Escort: give up while you still can...', 'If you let us ravage our target then we grant you passage today.']):
Director.Mission.__init__(self)
self.greetingText = greetingText
self.dedicatedattack = vsrandom.randrange(0, 2)*vsrandom.randrange(0, 2)
if (VS.GetRelation(factionname, protectivefactionname) >= 0.0):
self.dedicatedattack = 1
self.arrived = 0
self.launchedfriend = 0
self.protectivefaction = protectivefactionname
self.var_to_set = var_to_set
self.quantity = 0
self.mplay = "all"
self.gametime = VS.GetGameTime()
self.waves = waves
self.incoming = incoming
self.dynatkfg = dynamic_attack_fg
self.dynatktype = dynamic_type
self.dyndeffg = dynamic_defend_fg
self.dyndeftype = dynamic_defend_type
self.attackers = []
self.objective = 0
self.targetiter = 0
self.ship_check_count = 0
self.faction = factionname
self.jp = VS.Unit()
self.cred = creds
self.quantity = enemyquantity
self.savedquantity = enemyquantity
self.distance_from_base = distance_from_base
self.defendee = VS.Unit()
self.difficulty = 1
self.you = VS.getPlayer()
self.respawn = 0
name = self.you.getName()
self.successdelay = 0
self.mplay = universe.getMessagePlayer(self.you)
self.adjsys = go_to_adjacent_systems(self.you, numsystemsaway, jumps)
VS.IOmessage(0, "escort mission", self.mplay,
"Good Day, %s. Your mission is as follows:" % name)
self.adjsys.Print("You are in the %s system,",
"Proceed swiftly to %s.",
"Your arrival point is %s.",
"escort mission", 1)
def SetVarValue(self, value):
if (self.var_to_set != ''):
quest.removeQuest(self.you.isPlayerStarship(), self.var_to_set, value)
def SuccessMission(self):
self.defendee.setFgDirective('b')
self.defendee.setFlightgroupLeader(self.defendee)
if (self.incoming):
un = unit.getSignificant(vsrandom.randrange(0, 20), 1, 0)
if (un.getName() == self.defendee.getName()):
un = unit.getSignificant(vsrandom.randrange(0, 30), 1, 0)
if (un.getName() == self.defendee.getName()):
un = unit.getSignificant(vsrandom.randrange(0, 40), 1, 0)
if (un.getName() == self.defendee.getName()):
un = unit.getSignificant(vsrandom.randrange(0, 30), 1, 0)
if (un.getName() == self.defendee.getName()):
un = unit.getSignificant(vsrandom.randrange(0, 40), 1, 0)
if (un.getName() != self.defendee.getName()):
self.defendee.performDockingOperations(un, 0)
debug.info("docking with "+un.getName())
else:
self.defendee.ActivateJumpDrive(0)
self.defendee.SetTarget(self.adjsys.SignificantUnit())
self.successdelay = VS.GetGameTime()+1
def PayMission(self):
VS.AdjustRelation(self.you.getFactionName(), self.faction, .03, 1)
self.SetVarValue(1)
if (self.cred > 0):
self.you.addCredits(self.cred)
VS.IOmessage(0, "escort mission", self.mplay,
"Excellent work pilot! Your effort has thwarted the foe!")
VS.IOmessage(0, "escort mission", self.mplay,
"You have been rewarded for your effort as agreed.")
VS.terminateMission(1)
def FailMission(self):
self.you.addCredits(-self.cred)
VS.AdjustRelation(self.you.getFactionName(), self.faction, -.02, 1)
self.SetVarValue(-1)
VS.IOmessage(0, "escort mission", self.mplay,
"You Allowed the base you were to protect to be destroyed.")
VS.IOmessage(0, "escort mission", self.mplay,
"You are a failure to your race!")
VS.IOmessage(1, "escort mission", self.mplay,
"We have contacted your bank and informed them of your failure to deliver on credit."
" They have removed a number of your credits for this inconvenience."
" Let this serve as a lesson.")
VS.terminateMission(0)
def NoEnemiesInArea(self, jp):
if (self.adjsys.DestinationSystem() != VS.getSystemFile()):
return 0
if (self.ship_check_count >= len(self.attackers)):
VS.setCompleteness(self.objective, 1.0)
return 1
un = self.attackers[self.ship_check_count]
self.ship_check_count += 1
if (un.isNull() or (un.GetHullPercent() < .7 and self.defendee.getDistance(un) > 7000)):
return 0
else:
VS.setObjective(self.objective, "Destroy the %s"%un.getName())
self.ship_check_count = 0
return 0
def GenerateEnemies(self, jp, you):
count = 0
VS.addObjective("Protect %s from %s" % (jp.getName(), self.faction))
self.objective = VS.addObjective(
"Destroy All %s Hostiles" % self.faction)
VS.setCompleteness(self.objective, 0.0)
debug.info("quantity "+str(self.quantity))
while (count < self.quantity):
L = launch.Launch()
if self.dynatkfg == "":
atkfg = "Shadow"
else:
atkfg = self.dynatkfg
L.fg = atkfg
L.dynfg = ""
if count == 0:
L.fgappend = ""
else:
L.fgappend = "_"+str(count)
if (self.dynatktype == ''):
L.type = faction_ships.getRandomFighter(self.faction)
else:
L.type = self.dynatktype
L.ai = "default"
L.num = 1
L.minradius = 20000.0
L.maxradius = 25000.0
try:
L.minradius *= faction_ships.launch_distance_factor
L.maxradius *= faction_ships.launch_distance_factor
except:
pass
L.faction = self.faction
launched = L.launch(you)
if (count == 0):
self.you.SetTarget(launched)
if (1):
launched.SetTarget(jp)
else:
launched.SetTarget(you)
if (self.dedicatedattack):
launched.setFgDirective('B')
self.attackers += [launched]
count += 1
if (self.respawn == 0 and len(self.attackers) > 0):
self.respawn = 1
import universe
universe.greet(
self.greetingText, self.attackers[0], you, self.dyndeffg)
else:
VS.IOmessage(0, "escort mission", self.mplay,
"Eliminate all %s ships here" % self.faction)
VS.IOmessage(0, "escort mission", self.mplay,
"You must protect %s." % jp.getName())
self.quantity = 0
def GenerateDefendee(self):
import escort_mission
escort_mission.escort_num += 1
L = launch.Launch()
if self.dyndeffg == "":
deffg = "Escort"
L.fgappend = "_"+str(escort_mission.escort_num)
else:
deffg = self.dyndeffg
L.fgappend = ""
L.fg = deffg
L.dynfg = ""
L.faction = self.protectivefaction
if (self.dyndeffg == '' and self.dyndeftype == ''):
L.type = faction_ships.getRandomFighter(self.protectivefaction)
else:
L.type = self.dyndeftype
L.ai = "default"
L.num = 1
L.minradius = 2.0*self.you.rSize()
L.maxradius = 3.0*self.you.rSize()
L.forcetype = True
escortee = L.launch(self.you)
escortee.upgrade("jump_drive", 0, 0, 0, 1)
escortee.setFlightgroupLeader(self.you)
escortee.setFgDirective('F')
return escortee
def Execute(self):
if (self.successdelay):
if (self.defendee.getUnitSystemFile() != self.you.getUnitSystemFile() or VS.GetGameTime()-self.successdelay > 120):
if (self.defendee):
self.PayMission()
else:
self.FailMission()
return # nothing more happens inside this control
if (self.you.isNull() or (self.launchedfriend and self.defendee.isNull())):
VS.IOmessage(0, "escort mission", self.mplay,
"#ff0000You were unable to arrive in time to help. Mission failed.")
self.SetVarValue(-1)
VS.terminateMission(0)
return
if (not self.adjsys.Execute()):
return
if (not self.arrived):
self.arrived = 1
if (self.launchedfriend == 0 and not self.incoming):
self.defendee = self.GenerateDefendee()
self.launchedfriend = 1
self.adjsys = go_somewhere_significant(
self.you, 0, self.distance_from_base, 0)
self.adjsys.Print("You must visit the %s",
"escort mission", "docked around the %s", 0)
self.jp = self.adjsys.SignificantUnit()
else:
if (self.launchedfriend == 0):
self.defendee = self.GenerateDefendee()
self.launchedfriend = 1
if (self.defendee.isNull()):
self.FailMission(you)
return
else:
self.defendee.setFlightgroupLeader(self.you)
if (VS.GetGameTime()-self.gametime > 10):
self.defendee.setFgDirective('F')
if (self.quantity > 0):
self.GenerateEnemies(self.defendee, self.you)
if (self.ship_check_count == 0 and self.dedicatedattack):
if (self.targetiter >= len(self.attackers)):
self.targetiter = 0
else:
un = self.attackers[self.targetiter]
if (not un.isNull()):
un.SetTarget(self.defendee)
self.targetiter = self.targetiter+1
if (self.NoEnemiesInArea(self.defendee)):
if (self.waves > 0):
self.quantity = self.savedquantity
self.waves -= 1
else:
self.SuccessMission()
def initbriefing(self):
debug.info("init briefing")
def loopbriefing(self):
debug.info("loop briefing")
Briefing.terminate()
def endbriefing(self):
debug.info("ending briefing")
|
ermo/privateer_wcu
|
modules/missions/escort_local.py
|
Python
|
gpl-2.0
| 11,281
|
[
"VisIt"
] |
6732c3244787bb9faab0f3af84f8da04b12e8f19ff084ffbb078a0af574693a4
|
"""
Test rdkit_grid_featurizer module.
"""
import os
import unittest
import numpy as np
import pytest
from deepchem.feat.complex_featurizers import rdkit_grid_featurizer as rgf
np.random.seed(123)
def random_string(length, chars=None):
import string
if chars is None:
chars = list(string.ascii_letters + string.ascii_letters + '()[]+-.=#@/\\')
return ''.join(np.random.choice(chars, length))
class TestHelperFunctions(unittest.TestCase):
"""
Test helper functions defined in rdkit_grid_featurizer module.
"""
def setUp(self):
# TODO test more formats for ligand
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
def test_load_molecule(self):
# adding hydrogens and charges is tested in dc.utils
from rdkit.Chem.AllChem import Mol
for add_hydrogens in (True, False):
for calc_charges in (True, False):
mol_xyz, mol_rdk = rgf.load_molecule(self.ligand_file, add_hydrogens,
calc_charges)
num_atoms = mol_rdk.GetNumAtoms()
self.assertIsInstance(mol_xyz, np.ndarray)
self.assertIsInstance(mol_rdk, Mol)
self.assertEqual(mol_xyz.shape, (num_atoms, 3))
def test_generate_random_unit_vector(self):
for _ in range(100):
u = rgf.generate_random__unit_vector()
# 3D vector with unit length
self.assertEqual(u.shape, (3,))
self.assertAlmostEqual(np.linalg.norm(u), 1.0)
def test_generate_random_rotation_matrix(self):
# very basic test, we check if rotations actually work in test_rotate_molecules
for _ in range(100):
m = rgf.generate_random_rotation_matrix()
self.assertEqual(m.shape, (3, 3))
def test_rotate_molecules(self):
# check if distances do not change
vectors = np.random.rand(4, 2, 3)
norms = np.linalg.norm(vectors[:, 1] - vectors[:, 0], axis=1)
vectors_rot = np.array(rgf.rotate_molecules(vectors))
norms_rot = np.linalg.norm(vectors_rot[:, 1] - vectors_rot[:, 0], axis=1)
self.assertTrue(np.allclose(norms, norms_rot))
# check if it works for molecules with different numbers of atoms
coords = [np.random.rand(n, 3) for n in (10, 20, 40, 100)]
coords_rot = rgf.rotate_molecules(coords)
self.assertEqual(len(coords), len(coords_rot))
def test_compute_pairwise_distances(self):
n1 = 10
n2 = 50
coords1 = np.random.rand(n1, 3)
coords2 = np.random.rand(n2, 3)
distance = rgf.compute_pairwise_distances(coords1, coords2)
self.assertEqual(distance.shape, (n1, n2))
self.assertTrue((distance >= 0).all())
# random coords between 0 and 1, so the max possible distance in sqrt(2)
self.assertTrue((distance <= 2.0**0.5).all())
# check if correct distance metric was used
coords1 = np.array([[0, 0, 0], [1, 0, 0]])
coords2 = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]])
distance = rgf.compute_pairwise_distances(coords1, coords2)
self.assertTrue((distance == [[1, 2, 3], [0, 1, 2]]).all())
def test_unit_vector(self):
for _ in range(10):
vector = np.random.rand(3)
norm_vector = rgf.unit_vector(vector)
self.assertAlmostEqual(np.linalg.norm(norm_vector), 1.0)
def test_angle_between(self):
for _ in range(10):
v1 = np.random.rand(3,)
v2 = np.random.rand(3,)
angle = rgf.angle_between(v1, v2)
self.assertLessEqual(angle, np.pi)
self.assertGreaterEqual(angle, 0.0)
self.assertAlmostEqual(rgf.angle_between(v1, v1), 0.0)
self.assertAlmostEqual(rgf.angle_between(v1, -v1), np.pi)
def test_hash_ecfp(self):
for power in (2, 16, 64):
for _ in range(10):
string = random_string(10)
string_hash = rgf.hash_ecfp(string, power)
self.assertIsInstance(string_hash, int)
self.assertLess(string_hash, 2**power)
self.assertGreaterEqual(string_hash, 0)
def test_hash_ecfp_pair(self):
for power in (2, 16, 64):
for _ in range(10):
string1 = random_string(10)
string2 = random_string(10)
pair_hash = rgf.hash_ecfp_pair((string1, string2), power)
self.assertIsInstance(pair_hash, int)
self.assertLess(pair_hash, 2**power)
self.assertGreaterEqual(pair_hash, 0)
def test_convert_atom_to_voxel(self):
# 20 points with coords between -5 and 5, centered at 0
coords_range = 10
xyz = (np.random.rand(20, 3) - 0.5) * coords_range
for idx in np.random.choice(20, 6):
for box_width in (10, 20, 40):
for voxel_width in (0.5, 1, 2):
voxel = rgf.convert_atom_to_voxel(xyz, idx, box_width, voxel_width)
self.assertIsInstance(voxel, list)
self.assertEqual(len(voxel), 1)
self.assertIsInstance(voxel[0], np.ndarray)
self.assertEqual(voxel[0].shape, (3,))
self.assertIs(voxel[0].dtype, np.dtype('int'))
# indices are positive
self.assertTrue((voxel[0] >= 0).all())
# coordinates were properly translated and scaled
self.assertTrue(
(voxel[0] < (box_width + coords_range) / 2.0 / voxel_width).all())
self.assertTrue(
np.allclose(voxel[0],
np.floor((xyz[idx] + box_width / 2.0) / voxel_width)))
# for coordinates outside of the box function should properly transform them
# to indices and warn the user
for args in ((np.array([[0, 1, 6]]), 0, 10, 1.0), (np.array([[0, 4, -6]]),
0, 10, 1.0)):
# TODO check if function warns. There is assertWarns method in unittest,
# but it is not implemented in 2.7 and buggy in 3.5 (issue 29620)
voxel = rgf.convert_atom_to_voxel(*args)
self.assertTrue(
np.allclose(voxel[0], np.floor((args[0] + args[2] / 2.0) / args[3])))
def test_convert_atom_pair_to_voxel(self):
# 20 points with coords between -5 and 5, centered at 0
coords_range = 10
xyz1 = (np.random.rand(20, 3) - 0.5) * coords_range
xyz2 = (np.random.rand(20, 3) - 0.5) * coords_range
# 3 pairs of indices
for idx1, idx2 in np.random.choice(20, (3, 2)):
for box_width in (10, 20, 40):
for voxel_width in (0.5, 1, 2):
v1 = rgf.convert_atom_to_voxel(xyz1, idx1, box_width, voxel_width)
v2 = rgf.convert_atom_to_voxel(xyz2, idx2, box_width, voxel_width)
v_pair = rgf.convert_atom_pair_to_voxel((xyz1, xyz2), (idx1, idx2),
box_width, voxel_width)
self.assertEqual(len(v_pair), 2)
self.assertTrue((v1 == v_pair[0]).all())
self.assertTrue((v2 == v_pair[1]).all())
def test_compute_charge_dictionary(self):
from rdkit.Chem.AllChem import ComputeGasteigerCharges
for fname in (self.ligand_file, self.protein_file):
_, mol = rgf.load_molecule(fname)
ComputeGasteigerCharges(mol)
charge_dict = rgf.compute_charge_dictionary(mol)
self.assertEqual(len(charge_dict), mol.GetNumAtoms())
for i in range(mol.GetNumAtoms()):
self.assertIn(i, charge_dict)
self.assertIsInstance(charge_dict[i], (float, int))
class TestPiInteractions(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
# simple flat ring
from rdkit.Chem import MolFromSmiles
self.cycle4 = MolFromSmiles('C1CCC1')
self.cycle4.Compute2DCoords()
# load and sanitize two real molecules
_, self.prot = rgf.load_molecule(
os.path.join(current_dir, 'data', '3ws9_protein_fixer_rdkit.pdb'),
add_hydrogens=False,
calc_charges=False,
sanitize=True)
_, self.lig = rgf.load_molecule(
os.path.join(current_dir, 'data', '3ws9_ligand.sdf'),
add_hydrogens=False,
calc_charges=False,
sanitize=True)
def test_compute_ring_center(self):
# FIXME might break with different version of rdkit
self.assertTrue(
np.allclose(rgf.compute_ring_center(self.cycle4, range(4)), 0))
def test_compute_ring_normal(self):
# FIXME might break with different version of rdkit
normal = rgf.compute_ring_normal(self.cycle4, range(4))
self.assertTrue(
np.allclose(np.abs(normal / np.linalg.norm(normal)), [0, 0, 1]))
def test_is_pi_parallel(self):
ring1_center = np.array([0.0, 0.0, 0.0])
ring2_center_true = np.array([4.0, 0.0, 0.0])
ring2_center_false = np.array([10.0, 0.0, 0.0])
ring1_normal_true = np.array([1.0, 0.0, 0.0])
ring1_normal_false = np.array([0.0, 1.0, 0.0])
for ring2_normal in (np.array([2.0, 0, 0]), np.array([-3.0, 0, 0])):
# parallel normals
self.assertTrue(
rgf.is_pi_parallel(ring1_center, ring1_normal_true, ring2_center_true,
ring2_normal))
# perpendicular normals
self.assertFalse(
rgf.is_pi_parallel(ring1_center, ring1_normal_false,
ring2_center_true, ring2_normal))
# too far away
self.assertFalse(
rgf.is_pi_parallel(ring1_center, ring1_normal_true,
ring2_center_false, ring2_normal))
def test_is_pi_t(self):
ring1_center = np.array([0.0, 0.0, 0.0])
ring2_center_true = np.array([4.0, 0.0, 0.0])
ring2_center_false = np.array([10.0, 0.0, 0.0])
ring1_normal_true = np.array([0.0, 1.0, 0.0])
ring1_normal_false = np.array([1.0, 0.0, 0.0])
for ring2_normal in (np.array([2.0, 0, 0]), np.array([-3.0, 0, 0])):
# perpendicular normals
self.assertTrue(
rgf.is_pi_t(ring1_center, ring1_normal_true, ring2_center_true,
ring2_normal))
# parallel normals
self.assertFalse(
rgf.is_pi_t(ring1_center, ring1_normal_false, ring2_center_true,
ring2_normal))
# too far away
self.assertFalse(
rgf.is_pi_t(ring1_center, ring1_normal_true, ring2_center_false,
ring2_normal))
def test_compute_pi_stack(self):
# order of the molecules shouldn't matter
dicts1 = rgf.compute_pi_stack(self.prot, self.lig)
dicts2 = rgf.compute_pi_stack(self.lig, self.prot)
for i, j in ((0, 2), (1, 3)):
self.assertEqual(dicts1[i], dicts2[j])
self.assertEqual(dicts1[j], dicts2[i])
# with this criteria we should find both types of stacking
for d in rgf.compute_pi_stack(
self.lig, self.prot, dist_cutoff=7, angle_cutoff=40.):
self.assertGreater(len(d), 0)
def test_is_cation_pi(self):
cation_position = np.array([[2.0, 0.0, 0.0]])
ring_center_true = np.array([4.0, 0.0, 0.0])
ring_center_false = np.array([10.0, 0.0, 0.0])
ring_normal_true = np.array([1.0, 0.0, 0.0])
ring_normal_false = np.array([0.0, 1.0, 0.0])
# parallel normals
self.assertTrue(
rgf.is_cation_pi(cation_position, ring_center_true, ring_normal_true))
# perpendicular normals
self.assertFalse(
rgf.is_cation_pi(cation_position, ring_center_true, ring_normal_false))
# too far away
self.assertFalse(
rgf.is_cation_pi(cation_position, ring_center_false, ring_normal_true))
def test_compute_cation_pi(self):
# TODO find better example, currently dicts are empty
_ = rgf.compute_cation_pi(self.prot, self.lig)
_ = rgf.compute_cation_pi(self.lig, self.prot)
def test_compute_binding_pocket_cation_pi(self):
# TODO find better example, currently dicts are empty
prot_dict, lig_dict = rgf.compute_binding_pocket_cation_pi(
self.prot, self.lig)
exp_prot_dict, exp_lig_dict = rgf.compute_cation_pi(self.prot, self.lig)
add_lig, add_prot = rgf.compute_cation_pi(self.lig, self.prot)
for exp_dict, to_add in ((exp_prot_dict, add_prot), (exp_lig_dict,
add_lig)):
for atom_idx, count in to_add.items():
if atom_idx not in exp_dict:
exp_dict[atom_idx] = count
else:
exp_dict[atom_idx] += count
self.assertEqual(prot_dict, exp_prot_dict)
self.assertEqual(lig_dict, exp_lig_dict)
class TestFeaturizationFunctions(unittest.TestCase):
"""
Test functions calculating features defined in rdkit_grid_featurizer module.
"""
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
self.protein_file = os.path.join(current_dir, 'data',
'3ws9_protein_fixer_rdkit.pdb')
self.ligand_file = os.path.join(current_dir, 'data', '3ws9_ligand.sdf')
def test_compute_all_ecfp(self):
_, mol = rgf.load_molecule(self.ligand_file)
num_atoms = mol.GetNumAtoms()
for degree in range(1, 4):
# TODO test if dict contains smiles
ecfp_all = rgf.compute_all_ecfp(mol, degree=degree)
self.assertIsInstance(ecfp_all, dict)
self.assertEqual(len(ecfp_all), num_atoms)
self.assertEqual(list(ecfp_all.keys()), list(range(num_atoms)))
num_ind = np.random.choice(range(1, num_atoms))
indices = list(np.random.choice(num_atoms, num_ind, replace=False))
ecfp_selected = rgf.compute_all_ecfp(mol, indices=indices, degree=degree)
self.assertIsInstance(ecfp_selected, dict)
self.assertEqual(len(ecfp_selected), num_ind)
self.assertEqual(sorted(ecfp_selected.keys()), sorted(indices))
def test_featurize_binding_pocket_ecfp(self):
prot_xyz, prot_rdk = rgf.load_molecule(self.protein_file)
lig_xyz, lig_rdk = rgf.load_molecule(self.ligand_file)
distance = rgf.compute_pairwise_distances(
protein_xyz=prot_xyz, ligand_xyz=lig_xyz)
# check if results are the same if we provide precomputed distances
prot_dict, lig_dict = rgf.featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
)
prot_dict_dist, lig_dict_dist = rgf.featurize_binding_pocket_ecfp(
prot_xyz, prot_rdk, lig_xyz, lig_rdk, pairwise_distances=distance)
# ...but first check if we actually got two dicts
self.assertIsInstance(prot_dict, dict)
self.assertIsInstance(lig_dict, dict)
self.assertEqual(prot_dict, prot_dict_dist)
self.assertEqual(lig_dict, lig_dict_dist)
# check if we get less features with smaller distance cutoff
prot_dict_d2, lig_dict_d2 = rgf.featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
cutoff=2.0,
)
prot_dict_d6, lig_dict_d6 = rgf.featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
cutoff=6.0,
)
self.assertLess(len(prot_dict_d2), len(prot_dict))
# ligands are typically small so all atoms might be present
self.assertLessEqual(len(lig_dict_d2), len(lig_dict))
self.assertGreater(len(prot_dict_d6), len(prot_dict))
self.assertGreaterEqual(len(lig_dict_d6), len(lig_dict))
# check if using different ecfp_degree changes anything
prot_dict_e3, lig_dict_e3 = rgf.featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
ecfp_degree=3,
)
self.assertNotEqual(prot_dict_e3, prot_dict)
self.assertNotEqual(lig_dict_e3, lig_dict)
def test_compute_splif_features_in_range(self):
prot_xyz, prot_rdk = rgf.load_molecule(self.protein_file)
lig_xyz, lig_rdk = rgf.load_molecule(self.ligand_file)
prot_num_atoms = prot_rdk.GetNumAtoms()
lig_num_atoms = lig_rdk.GetNumAtoms()
distance = rgf.compute_pairwise_distances(
protein_xyz=prot_xyz, ligand_xyz=lig_xyz)
for bins in ((0, 2), (2, 3)):
splif_dict = rgf.compute_splif_features_in_range(
prot_rdk,
lig_rdk,
distance,
bins,
)
self.assertIsInstance(splif_dict, dict)
for (prot_idx, lig_idx), ecfp_pair in splif_dict.items():
for idx in (prot_idx, lig_idx):
self.assertIsInstance(idx, (int, np.int64))
self.assertGreaterEqual(prot_idx, 0)
self.assertLess(prot_idx, prot_num_atoms)
self.assertGreaterEqual(lig_idx, 0)
self.assertLess(lig_idx, lig_num_atoms)
for ecfp in ecfp_pair:
ecfp_idx, ecfp_frag = ecfp.split(',')
ecfp_idx = int(ecfp_idx)
self.assertGreaterEqual(ecfp_idx, 0)
# TODO upperbound?
def test_featurize_splif(self):
prot_xyz, prot_rdk = rgf.load_molecule(self.protein_file)
lig_xyz, lig_rdk = rgf.load_molecule(self.ligand_file)
distance = rgf.compute_pairwise_distances(
protein_xyz=prot_xyz, ligand_xyz=lig_xyz)
bins = [(1, 2), (2, 3)]
dicts = rgf.featurize_splif(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
contact_bins=bins,
pairwise_distances=distance,
ecfp_degree=2)
expected_dicts = [
rgf.compute_splif_features_in_range(
prot_rdk, lig_rdk, distance, c_bin, ecfp_degree=2) for c_bin in bins
]
self.assertIsInstance(dicts, list)
self.assertEqual(dicts, expected_dicts)
@pytest.mark.linux_only
class TestRdkitGridFeaturizer(unittest.TestCase):
"""
Test RdkitGridFeaturizer class defined in rdkit_grid_featurizer module.
"""
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(current_dir))
self.protein_file = os.path.join(package_dir, 'dock', 'tests',
'1jld_protein.pdb')
self.ligand_file = os.path.join(package_dir, 'dock', 'tests',
'1jld_ligand.sdf')
def test_default_featurizer(self):
# test if default parameters work
featurizer = rgf.RdkitGridFeaturizer()
self.assertIsInstance(featurizer, rgf.RdkitGridFeaturizer)
feature_tensor, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertIsInstance(feature_tensor, np.ndarray)
def test_example_featurizer(self):
# check if use-case from examples works
featurizer = rgf.RdkitGridFeaturizer(
voxel_width=16.0,
feature_types=['ecfp', 'splif', 'hbond', 'salt_bridge'],
ecfp_power=9,
splif_power=9,
flatten=True)
feature_tensor, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertIsInstance(feature_tensor, np.ndarray)
def test_force_flatten(self):
# test if input is flattened when flat features are used
featurizer = rgf.RdkitGridFeaturizer(
feature_types=['ecfp_hashed'], flatten=False)
featurizer.flatten = True # False should be ignored with ecfp_hashed
feature_tensor, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertIsInstance(feature_tensor, np.ndarray)
self.assertEqual(feature_tensor.shape, (1, 2 * 2**featurizer.ecfp_power))
def test_combined(self):
ecfp_power = 5
splif_power = 5
# test voxel features
featurizer = rgf.RdkitGridFeaturizer(
voxel_width=1.0,
box_width=20.0,
feature_types=['voxel_combined'],
ecfp_power=ecfp_power,
splif_power=splif_power,
flatten=False,
sanitize=True)
feature_tensor, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertIsInstance(feature_tensor, np.ndarray)
voxel_total_len = (
2**ecfp_power +
len(featurizer.cutoffs['splif_contact_bins']) * 2**splif_power + len(
featurizer.cutoffs['hbond_dist_bins']) + 5)
self.assertEqual(feature_tensor.shape, (1, 20, 20, 20, voxel_total_len))
# test flat features
featurizer = rgf.RdkitGridFeaturizer(
voxel_width=1.0,
feature_types=['flat_combined'],
ecfp_power=ecfp_power,
splif_power=splif_power,
sanitize=True)
feature_tensor, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertIsInstance(feature_tensor, np.ndarray)
flat_total_len = (
3 * 2**ecfp_power +
len(featurizer.cutoffs['splif_contact_bins']) * 2**splif_power + len(
featurizer.cutoffs['hbond_dist_bins']))
self.assertEqual(feature_tensor.shape, (1, flat_total_len))
# check if aromatic features are ignores if sanitize=False
featurizer = rgf.RdkitGridFeaturizer(
voxel_width=16.0,
feature_types=['all_combined'],
ecfp_power=ecfp_power,
splif_power=splif_power,
flatten=True,
sanitize=False)
self.assertTrue('pi_stack' not in featurizer.feature_types)
self.assertTrue('cation_pi' not in featurizer.feature_types)
feature_tensor, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertIsInstance(feature_tensor, np.ndarray)
total_len = voxel_total_len + flat_total_len - 3 - 2**ecfp_power
self.assertEqual(feature_tensor.shape, (1, total_len))
def test_custom_cutoffs(self):
custom_cutoffs = {
'hbond_dist_bins': [(2., 3.), (3., 3.5)],
'hbond_angle_cutoffs': [5, 90],
'splif_contact_bins': [(0, 3.5), (3.5, 6.0)],
'ecfp_cutoff': 5.0,
'sybyl_cutoff': 3.0,
'salt_bridges_cutoff': 4.0,
'pi_stack_dist_cutoff': 5.0,
'pi_stack_angle_cutoff': 15.0,
'cation_pi_dist_cutoff': 5.5,
'cation_pi_angle_cutoff': 20.0,
}
rgf_featurizer = rgf.RdkitGridFeaturizer(**custom_cutoffs)
self.assertEqual(rgf_featurizer.cutoffs, custom_cutoffs)
def test_rotations(self):
featurizer = rgf.RdkitGridFeaturizer(
nb_rotations=3,
feature_types=['voxel_combined'],
flatten=False,
sanitize=True)
feature_tensors, _ = featurizer.featurize([self.ligand_file],
[self.protein_file])
self.assertEqual(feature_tensors.shape, (1, 4, 16, 16, 16, 40))
def test_voxelize(self):
prot_xyz, prot_rdk = rgf.load_molecule(self.protein_file)
lig_xyz, lig_rdk = rgf.load_molecule(self.ligand_file)
centroid = rgf.compute_centroid(lig_xyz)
prot_xyz = rgf.subtract_centroid(prot_xyz, centroid)
lig_xyz = rgf.subtract_centroid(lig_xyz, centroid)
prot_ecfp_dict, lig_ecfp_dict = rgf.featurize_binding_pocket_ecfp(
prot_xyz, prot_rdk, lig_xyz, lig_rdk)
box_w = 20
f_power = 5
rgf_featurizer = rgf.RdkitGridFeaturizer(
box_width=box_w,
ecfp_power=f_power,
feature_types=['all_combined'],
flatten=True,
sanitize=True)
prot_tensor = rgf_featurizer._voxelize(
rgf.convert_atom_to_voxel,
rgf.hash_ecfp,
prot_xyz,
feature_dict=prot_ecfp_dict,
channel_power=f_power)
self.assertEqual(prot_tensor.shape, tuple([box_w] * 3 + [2**f_power]))
all_features = prot_tensor.sum()
# protein is too big for the box, some features should be missing
self.assertGreater(all_features, 0)
self.assertLess(all_features, prot_rdk.GetNumAtoms())
lig_tensor = rgf_featurizer._voxelize(
rgf.convert_atom_to_voxel,
rgf.hash_ecfp,
lig_xyz,
feature_dict=lig_ecfp_dict,
channel_power=f_power)
self.assertEqual(lig_tensor.shape, tuple([box_w] * 3 + [2**f_power]))
all_features = lig_tensor.sum()
# whole ligand should fit in the box
self.assertEqual(all_features, lig_rdk.GetNumAtoms())
|
lilleswing/deepchem
|
deepchem/feat/tests/test_rdkit_grid_features.py
|
Python
|
mit
| 23,753
|
[
"RDKit"
] |
62de3aa0263176131c8f9d792b51fc8afcae81a1c06ee440b6c34659c85fd7a2
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
import pyscf.lib.logger as logger
from pyscf.mcscf import mc1step
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=None, dump_chk=True):
if verbose is None:
verbose = casscf.verbose
if callback is None:
callback = casscf.callback
log = logger.Logger(casscf.stdout, verbose)
cput0 = (logger.process_clock(), logger.perf_counter())
log.debug('Start 2-step CASSCF')
mo = mo_coeff
nmo = mo.shape[1]
ncore = casscf.ncore
ncas = casscf.ncas
nocc = ncore + ncas
eris = casscf.ao2mo(mo)
e_tot, e_cas, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if ncas == nmo and not casscf.internal_rotation:
if casscf.canonicalization:
log.debug('CASSCF canonicalization')
mo, fcivec, mo_energy = casscf.canonicalize(mo, fcivec, eris,
casscf.sorting_mo_energy,
casscf.natorb, verbose=log)
else:
mo_energy = None
return True, e_tot, e_cas, fcivec, mo, mo_energy
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
de, elast = e_tot, e_tot
totmicro = totinner = 0
casdm1 = 0
r0 = None
t2m = t1m = log.timer('Initializing 2-step CASSCF', *cput0)
imacro = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
njk = 0
t3m = t2m
casdm1_old = casdm1
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, ncas, casscf.nelecas)
norm_ddm = numpy.linalg.norm(casdm1 - casdm1_old)
t3m = log.timer('update CAS DM', *t3m)
max_cycle_micro = 1 # casscf.micro_cycle_scheduler(locals())
max_stepsize = casscf.max_stepsize_scheduler(locals())
for imicro in range(max_cycle_micro):
rota = casscf.rotate_orb_cc(mo, lambda:fcivec, lambda:casdm1, lambda:casdm2,
eris, r0, conv_tol_grad*.3, max_stepsize, log)
u, g_orb, njk1, r0 = next(rota)
rota.close()
njk += njk1
norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
norm_gorb = numpy.linalg.norm(g_orb)
if imicro == 0:
norm_gorb0 = norm_gorb
de = numpy.dot(casscf.pack_uniq_var(u), g_orb)
t3m = log.timer('orbital rotation', *t3m)
eris = None
u = u.copy()
g_orb = g_orb.copy()
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t3m = log.timer('update eri', *t3m)
log.debug('micro %d ~dE=%5.3g |u-1|=%5.3g |g[o]|=%5.3g |dm1|=%5.3g',
imicro, de, norm_t, norm_gorb, norm_ddm)
if callable(callback):
callback(locals())
t2m = log.timer('micro iter %d'%imicro, *t2m)
if norm_t < 1e-4 or abs(de) < tol*.4 or norm_gorb < conv_tol_grad*.2:
break
totinner += njk
totmicro += imicro + 1
e_tot, e_cas, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
log.timer('CASCI solver', *t3m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol and
norm_gorb < conv_tol_grad and norm_ddm < conv_tol_ddm):
conv = True
else:
elast = e_tot
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('2-step CASSCF converged in %d macro (%d JK %d micro) steps',
imacro, totinner, totmicro)
else:
log.info('2-step CASSCF not converged, %d macro (%d JK %d micro) steps',
imacro, totinner, totmicro)
if casscf.canonicalization:
log.info('CASSCF canonicalization')
mo, fcivec, mo_energy = \
casscf.canonicalize(mo, fcivec, eris, casscf.sorting_mo_energy,
casscf.natorb, casdm1, log)
if casscf.natorb and dump_chk: # dump_chk may save casdm1
occ, ucas = casscf._eig(-casdm1, ncore, nocc)
casdm1 = numpy.diag(-occ)
else:
if casscf.natorb:
# FIXME (pyscf-2.0): Whether to transform natural orbitals in
# active space when this flag is enabled?
log.warn('The attribute natorb of mcscf object affects only the '
'orbital canonicalization.\n'
'If you would like to get natural orbitals in active space '
'without touching core and external orbitals, an explicit '
'call to mc.cas_natorb_() is required')
mo_energy = None
if dump_chk:
casscf.dump_chk(locals())
log.timer('2-step CASSCF', *cput0)
return conv, e_tot, e_cas, fcivec, mo, mo_energy
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(mc1step.CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.22013929407)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = mc1step.CASSCF(m, 6, 4)
mc.verbose = 4
mo = m.mo_coeff.copy()
mo[:,2:5] = m.mo_coeff[:,[4,2,3]]
emc = mc.mc2step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
|
sunqm/pyscf
|
pyscf/mcscf/mc2step.py
|
Python
|
apache-2.0
| 7,056
|
[
"PySCF"
] |
8d3da47b32339a5085ab4cc513b616c42c12930d633d0a8e4def1332f128183a
|
# -*- coding: utf-8 -*-
#
# test_refractory.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import nest
"""
Assert that all neuronal models that have a refractory period implement it
correctly (except for Hodgkin-Huxley models which cannot be tested).
Details
-------
Submit the neuron to a constant excitatory current so that it spikes in the
[0, 50] ms.
A ``spike_detector`` is used to detect the time at which the neuron spikes and
a ``voltmeter`` is then used to make sure the voltage is clamped to ``V_reset``
during exactly ``t_ref``.
For neurons that do not clamp the potential, use a very large current to
trigger immediate spiking
Untested models
---------------
* ``aeif_cond_alpha_RK5``
* ``ginzburg_neuron``
* ``hh_cond_exp_traub``
* ``hh_psc_alpha``
* ``hh_psc_alpha_gap``
* ``ht_neuron``
* ``iaf_chs_2007``
* ``iaf_chxk_2008``
* ``iaf_tum_2000``
* ``izhikevich``
* ``mcculloch_pitts_neuron``
* ``parrot_neuron``
* ``parrot_neuron_ps``
* ``pp_pop_psc_delta``
* ``pp_psc_delta``
* ``sli_neuron``
"""
# --------------------------------------------------------------------------- #
# Models, specific parameters
# -------------------------
#
# list of all neuronal models that can be tested by looking at clamped V
neurons_V_clamped = [
'aeif_cond_alpha',
'aeif_cond_alpha_multisynapse',
'aeif_cond_beta_multisynapse',
'aeif_cond_exp',
'aeif_psc_alpha',
'aeif_psc_exp',
'gif_cond_exp',
'gif_cond_exp_multisynapse',
'gif_psc_exp',
'gif_psc_exp_multisynapse',
'iaf_cond_alpha',
'iaf_cond_alpha_mc',
'iaf_cond_exp',
'iaf_cond_exp_sfa_rr',
'iaf_neuron',
'iaf_psc_alpha',
'iaf_psc_alpha_multisynapse',
'iaf_psc_delta',
'iaf_psc_exp',
'iaf_psc_exp_multisynapse',
]
# neurons that must be tested through a high current to spike immediately
# (t_ref = interspike)
neurons_interspike = [
"amat2_psc_exp",
"mat2_psc_exp",
"ht_neuron",
]
neurons_interspike_ps = [
"iaf_psc_alpha_canon",
"iaf_psc_alpha_presc",
"iaf_psc_delta_canon",
"iaf_psc_exp_ps",
]
# models that cannot be tested
ignore_model = [
"aeif_cond_alpha_RK5", # this one is faulty and will be removed
"ginzburg_neuron",
"hh_cond_exp_traub",
"hh_psc_alpha",
"hh_psc_alpha_gap",
"iaf_chs_2007",
"iaf_chxk_2008",
"iaf_tum_2000",
"izhikevich",
"mcculloch_pitts_neuron",
"parrot_neuron",
"parrot_neuron_ps",
"pp_pop_psc_delta",
"pp_psc_delta",
"sli_neuron",
]
tested_models = [m for m in nest.Models("nodes") if (nest.GetDefaults(
m, "element_type") == "neuron" and m not in ignore_model)]
# additional parameters for the connector
add_connect_param = {
"iaf_cond_alpha_mc": {"receptor_type": 7},
}
# --------------------------------------------------------------------------- #
# Simulation time and refractory time limits
# -------------------------
#
simtime = 100
resolution = 0.1
min_steps = 1 # minimal number of refractory steps (t_ref = resolution)
max_steps = 200 # maximal number of steps (t_ref = 200 * resolution)
# --------------------------------------------------------------------------- #
# Test class
# -------------------------
#
def foreach_neuron(func):
'''
Decorator that automatically does the test for all neurons.
'''
def wrapper(*args, **kwargs):
self = args[0]
msd = 123456
N_vp = nest.GetKernelStatus(['total_num_virtual_procs'])[0]
pyrngs = [np.random.RandomState(s) for s in range(msd, msd + N_vp)]
for name in tested_models:
nest.ResetKernel()
nest.SetKernelStatus({
'resolution': resolution, 'grng_seed': msd + N_vp,
'rng_seeds': range(msd + N_vp + 1, msd + 2 * N_vp + 1)})
func(self, name, **kwargs)
return wrapper
class RefractoryTestCase(unittest.TestCase):
"""
Check the correct implementation of refractory time in all neuronal models.
"""
def compute_reftime(self, model, sd, vm, neuron):
'''
Compute the refractory time of the neuron.
Parameters
----------
model : str
Name of the neuronal model.
sd : tuple
GID of the spike detector.
vm : tuple
GID of the voltmeter.
neuron : tuple
GID of the recorded neuron.
Returns
-------
t_ref_sim : double
Value of the simulated refractory period.
'''
spike_times = nest.GetStatus(sd, "events")[0]["times"]
if model in neurons_interspike:
# spike emitted at next timestep so substract resolution
return spike_times[1]-spike_times[0]-resolution
elif model in neurons_interspike_ps:
return spike_times[1]-spike_times[0]
else:
Vr = nest.GetStatus(neuron, "V_reset")[0]
times = nest.GetStatus(vm, "events")[0]["times"]
# index of the 2nd spike
idx_max = np.argwhere(times == spike_times[1])[0][0]
name_Vm = "V_m.s" if model == "iaf_cond_alpha_mc" else "V_m"
Vs = nest.GetStatus(vm, "events")[0][name_Vm]
# get the index at which the spike occured
idx_spike = np.argwhere(times == spike_times[0])[0][0]
# find end of refractory period between 1st and 2nd spike
idx_end = np.where(
np.isclose(Vs[idx_spike:idx_max], Vr, 1e-6))[0][-1]
t_ref_sim = idx_end * resolution
return t_ref_sim
@foreach_neuron
def test_refractory_time(self, model):
'''
Check that refractory time implementation is correct.
'''
# randomly set a refractory period
t_ref = resolution * np.random.randint(min_steps, max_steps)
# create the neuron and devices
nparams = {"t_ref": t_ref}
neuron = nest.Create(model, params=nparams)
name_Vm = "V_m.s" if model == "iaf_cond_alpha_mc" else "V_m"
vm_params = {"interval": resolution, "record_from": [name_Vm]}
vm = nest.Create("voltmeter", params=vm_params)
sd = nest.Create("spike_detector", params={'precise_times': True})
cg = nest.Create("dc_generator", params={"amplitude": 900.})
# for models that do not clamp V_m, use very large current to trigger
# almost immediate spiking => t_ref almost equals interspike
if model in neurons_interspike_ps:
nest.SetStatus(cg, "amplitude", 10000000.)
elif model in neurons_interspike:
nest.SetStatus(cg, "amplitude", 2000.)
# connect them and simulate
nest.Connect(vm, neuron)
nest.Connect(cg, neuron, syn_spec=add_connect_param.get(model, {}))
nest.Connect(neuron, sd)
nest.Simulate(simtime)
# get and compare t_ref
t_ref_sim = self.compute_reftime(model, sd, vm, neuron)
# approximate result for precise spikes (interpolation error)
if model in neurons_interspike_ps:
self.assertAlmostEqual(t_ref, t_ref_sim, places=3,
msg='''Error in model {}:
{} != {}'''.format(model, t_ref, t_ref_sim))
else:
self.assertAlmostEqual(t_ref, t_ref_sim, msg='''Error in model {}:
{} != {}'''.format(model, t_ref, t_ref_sim))
# --------------------------------------------------------------------------- #
# Run the comparisons
# ------------------------
#
def suite():
return unittest.makeSuite(RefractoryTestCase, "test")
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
|
tillschumann/nest-simulator
|
pynest/nest/tests/test_refractory.py
|
Python
|
gpl-2.0
| 8,437
|
[
"NEURON"
] |
d0401d0a111ce83f99980db6b619e28081ee54af3cfacd8c7d3659f218583be6
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes break statements by de-sugaring into a control boolean."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
class BreakCanonicalizationTransformer(transformer.Base):
"""Canonicalizes break statements into additional conditionals."""
def __init__(self, context):
super(BreakCanonicalizationTransformer, self).__init__(context)
# This is a stack structure, to correctly process nested loops.
# Each item is a list [break_used, break_variable_name]
self.break_uses = []
def _create_break_check(self):
template = """
(not var_name)
"""
expr, = templates.replace(template, var_name=self.break_uses[-1][1])
return expr.value
def _create_break_trigger(self):
template = """
var_name = True
"""
block = templates.replace(template, var_name=self.break_uses[-1][1])
block.append(gast.Continue())
return block
def _create_break_init(self):
template = """
var_name = False
"""
assign, = templates.replace(template, var_name=self.break_uses[-1][1])
return assign
# TODO(mdan): Surely the transformer supports this better?
def _manual_visit_list(self, block):
new_block = []
for n in block:
new_n = self.visit(n)
if isinstance(new_n, list):
new_block.extend(new_n)
else:
new_block.append(new_n)
return new_block
def visit_While(self, node):
self.generic_visit(node.test)
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
break_var = self.context.namer.new_symbol('break_requested',
scope.referenced)
self.break_uses.append([False, break_var])
node.body = self._manual_visit_list(node.body)
if self.break_uses[-1][0]:
node.test = gast.BoolOp(gast.And(), [
node.test,
gast.UnaryOp(gast.Not(), gast.Name(break_var, gast.Load(), None))
])
final_nodes = [self._create_break_init(), node]
else:
final_nodes = node
self.break_uses.pop()
for n in node.orelse:
self.generic_visit(n)
return final_nodes
def visit_For(self, node):
self.generic_visit(node.target)
self.generic_visit(node.iter)
scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
break_var = self.context.namer.new_symbol('break_requested',
scope.referenced)
self.break_uses.append([False, break_var])
node.body = self._manual_visit_list(node.body)
if self.break_uses[-1][0]:
extra_cond = templates.replace_as_expression(
'not var_name', var_name=break_var)
anno.setanno(node, 'extra_cond', extra_cond)
final_nodes = [self._create_break_init(), node]
else:
final_nodes = node
self.break_uses.pop()
for n in node.orelse:
self.generic_visit(n)
return final_nodes
def visit_Break(self, node):
self.break_uses[-1][0] = True
return self._create_break_trigger()
def transform(node, context):
return BreakCanonicalizationTransformer(context).visit(node)
|
allenlavoie/tensorflow
|
tensorflow/contrib/autograph/converters/break_statements.py
|
Python
|
apache-2.0
| 4,048
|
[
"VisIt"
] |
9bce6137149eb567b583c30f6b45581ca9392928d673526f95c67e32419cfd38
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_OC_fossil_1960-2020_greg.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i312: Organic carbon fossil fuel surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i312'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='OC_fossil'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='OC fossil fuel surf emissions expressed as carbon'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_OC_fossil_1960-2020_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of organic carbon from 1960 to 2020 (from selected anthropogenic fossil fuel sources only)'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_OC_fossil_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 17,348
|
[
"NetCDF"
] |
985a35997ba4312150a7ddadfa5bfc19791f763bc147ec2633048da99bf437ed
|
'''
===============================================================
:mod:`gridcells.analysis.fields` - grid field related analysis
===============================================================
The :mod:`~gridcells.analysis.fields` module contains routines to analyse
spiking data either from experiments involoving a rodent running in an arena or
simulations involving an animat running in a simulated arena.
Functions
---------
.. autosummary::
gridnessScore
occupancy_prob_dist
spatialAutoCorrelation
spatialRateMap
'''
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import numpy.ma as ma
from scipy.integrate import trapz
from scipy.signal import correlate2d
from scipy.ndimage.interpolation import rotate
# Do not import when in RDT environment
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
from . import _fields
from ..core import Pair2D
def spatialRateMap(spikeTimes, positions, arena, sigma):
'''Compute spatial rate map for spikes of a given neuron.
Preprocess neuron spike times into a smoothed spatial rate map, given arena
parameters. Both spike times and positional data must be aligned in time!
The rate map will be smoothed by a gaussian kernel.
Parameters
----------
spikeTimes : np.ndarray
Spike times for a given neuron.
positions : gridcells.core.Position2D
Positional data for these spikes. The timing must be aligned with
``spikeTimes``
arena : gridcells.core.Arena
The specification of the arena in which movement was carried out.
sigma : float
Standard deviation of the Gaussian smoothing kernel.
Returns
-------
rateMap : np.ma.MaskedArray
The 2D spatial firing rate map. The shape will be determined by the
arena type.
'''
spikeTimes = np.asarray(spikeTimes, dtype=np.double)
edges = arena.getDiscretisation()
rateMap = _fields.spatialRateMap(spikeTimes,
positions.x, positions.y, positions.dt,
edges.x, edges.y,
sigma)
# Mask values which are outside the arena
rateMap = np.ma.MaskedArray(rateMap, mask=arena.getMask(), copy=False)
return rateMap.T
def spatialAutoCorrelation(rateMap, arenaDiam, h):
'''Compute autocorrelation function of the spatial firing rate map.
This function assumes that the arena is a circle and masks all values of
the autocorrelation that are outside the `arenaDiam`.
.. warning::
This function will undergo serious interface changes in the future.
Parameters
----------
rateMap : np.ndarray
Spatial firing rate map (2D). The shape should be `(arenadiam/h+1,
arenadiam/2+1)`.
arenaDiam : float
Diameter of the arena.
h : float
Precision of the spatial firing rate map.
Returns
-------
corr : np.ndarray
The autocorrelation function, of shape `(arenadiam/h*2+1,
arenaDiam/h*2+1)`
xedges, yedges : np.ndarray
Values of the spatial lags for the correlation function. The same shape
as `corr.shape[0]`.
'''
precision = arenaDiam/h
xedges = np.linspace(-arenaDiam, arenaDiam, precision*2 + 1)
yedges = np.linspace(-arenaDiam, arenaDiam, precision*2 + 1)
X, Y = np.meshgrid(xedges, yedges)
corr = ma.masked_array(correlate2d(rateMap, rateMap), mask = np.sqrt(X**2 + Y**2) > arenaDiam)
return corr, xedges, yedges
def gridnessScore(rateMap, arenaDiam, h, corr_cutRmin):
'''Calculate gridness score of a spatial firing rate map.
Parameters
----------
rateMap : np.ndarray
Spatial firing rate map.
arenaDiam : float
The diameter of the arena.
h : float
Precision of the spatial firing rate map.
Returns
-------
G : float
Gridness score.
crossCorr : np.ndarray
An array containing cross correlation values of the rotated
autocorrelations, with the original autocorrelation.
angles : np.ndarray
An array of angles corresponding to the `crossCorr` array.
Notes
-----
This function computes gridness score accoring to [1]_. The auto
correlation of the firing rate map is rotated in 3 degree steps. The
resulting gridness score is the difference between a minimum of cross
correlations at 60 and 90 degrees, and a maximum of cross correlations at
30, 90 and 150 degrees.
The center of the auto correlation map (given by corr_cutRmin) is removed
from the map.
References
----------
.. [1] Hafting, T. et al., 2005. Microstructure of a spatial map in the
entorhinal cortex. Nature, 436(7052), pp.801-806.
'''
rateMap_mean = rateMap - np.mean(np.reshape(rateMap, (1, rateMap.size)))
autoCorr, autoC_xedges, autoC_yedges = SNAutoCorr(rateMap_mean, arenaDiam, h)
# Remove the center point and
X, Y = np.meshgrid(autoC_xedges, autoC_yedges)
autoCorr[np.sqrt(X**2 + Y**2) < corr_cutRmin] = 0
da = 3
angles = list(range(0, 180+da, da))
crossCorr = []
# Rotate and compute correlation coefficient
for angle in angles:
autoCorrRot = rotate(autoCorr, angle, reshape=False)
C = np.corrcoef(np.reshape(autoCorr, (1, autoCorr.size)),
np.reshape(autoCorrRot, (1, autoCorrRot.size)))
crossCorr.append(C[0, 1])
max_angles_i = np.array([30, 90, 150]) / da
min_angles_i = np.array([60, 120]) / da
maxima = np.max(np.array(crossCorr)[max_angles_i])
minima = np.min(np.array(crossCorr)[min_angles_i])
G = minima - maxima
return G, np.array(crossCorr), angles
def extractSpikePositions(spikeTimes, positions):
spikeIdx = spikeTimes / positions.dt
pos_x = _fields.extractSpikePos(spikeIdx, positions.x)
pos_y = _fields.extractSpikePos(spikeIdx, positions.y)
return Pair2D(pos_x, pos_y), np.max(spikeIdx)
def occupancy_prob_dist(arena, pos):
'''Calculate a probability distribution for animal positions in an arena.
Parameters
----------
arena : :class:`~gridcells.core.arena.Arena`
Arena the animal was running in.
pos : :class:`~gridcells.core.common.Position2D`
Positions of the animal.
Returns
-------
dist : numpy.ndarray
Probability distribution for the positional data, given the
discretisation of the arena. The first dimension is the y axis, the
second dimension is the x axis. The shape of the distribution is equal
to the number of items in the discretised edges of the arena.
'''
edges = arena.getDiscretisation()
dx = arena.getDiscretisationSteps()
xedges = np.hstack((edges.x, [edges.x[-1] + dx.x]))
yedges = np.hstack((edges.y, [edges.y[-1] + dx.y]))
H, _, _ = np.histogram2d(pos.x, pos.y, bins=[xedges, yedges], normed=False)
return (H / len(pos)).T
|
lsolanka/gridcells
|
gridcells/analysis/fields.py
|
Python
|
gpl-3.0
| 7,043
|
[
"Gaussian",
"NEURON"
] |
b04fb65916d8a247e9e7370f4286fde0e7ede2f59388f1435f9232f05482b468
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import multiselectfield.db.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('branch', '0029_auto_20141203_1434'),
]
operations = [
migrations.AlterField(
model_name='demand',
name='category',
field=multiselectfield.db.fields.MultiSelectField(choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other ...')], max_length=21, verbose_name='Type of help'),
preserve_default=True,
),
migrations.AlterField(
model_name='demand',
name='receive_help_from_who',
field=models.IntegerField(default=5, choices=[(5, 'All'), (3, 'Verified member'), (6, 'My favorite members')], verbose_name='Who can see and respond to demand/offer'),
preserve_default=True,
),
migrations.AlterField(
model_name='demand',
name='success',
field=models.NullBooleanField(default=None, verbose_name='Succeded'),
preserve_default=True,
),
migrations.AlterField(
model_name='demandproposition',
name='created',
field=models.DateTimeField(auto_now=True, verbose_name='Creation date'),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='category',
field=multiselectfield.db.fields.MultiSelectField(choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other ...')], max_length=21, verbose_name='Type of help'),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='receive_help_from_who',
field=models.IntegerField(default=5, choices=[(5, 'All'), (3, 'Verified member'), (6, 'My favorite members')], verbose_name='Who can see and respond to demand/offer'),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='ask_to',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, related_name='success_pending', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='asked_by',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, related_name='approval_pending', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='branch',
field=models.ForeignKey(to='branch.Branch', null=True, related_name='success_branch_pending', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='comment',
field=models.TextField(null=True, blank=True, verbose_name='Comments'),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='created',
field=models.DateTimeField(auto_now=True, verbose_name='Creation date'),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='demand',
field=models.ForeignKey(to='branch.Demand', null=True, related_name='success_demand', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='successdemand',
name='time',
field=models.IntegerField(null=True, blank=True, verbose_name='Time spent (in minutes)'),
preserve_default=True,
),
]
|
MaximeBiset/care4care
|
branch/migrations/0030_auto_20141203_1712.py
|
Python
|
agpl-3.0
| 4,193
|
[
"VisIt"
] |
d2a0ec7330ca5801e8635ef03e5ee6f06e7040a5fd25a2a09ee35049ad4d2396
|
# Experiment script to compare ReLU dAs versus Gaussian-Bernoulli dAs
# Train each with varying amounts of noise,
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from AutoEncoder import AutoEncoder
from AutoEncoder import ReluAutoEncoder
from AutoEncoder import GaussianAutoEncoder
from extract_datasets import extract_labeled_chunkrange
from load_shared import load_data_labeled
from tables import *
import os
import sys
import time
from datetime import datetime
from optparse import OptionParser
def drive_dA(learning_rate=0.00001, training_epochs=100,
batch_size=32):
"""
This dA is driven with foci data
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type batch_size: int
:param batch_size: size of each minibatch
"""
parser = OptionParser()
parser.add_option("-d", "--dir", dest="dir", help="test output directory")
parser.add_option("-c", "--corruption", dest="corruption", help="use this amount of corruption for the denoising AE", type="float")
parser.add_option("-i", "--inputfile", dest="inputfile", help="the hdf5 filename as an absolute pathname")
(options, args) = parser.parse_args()
#current_dir = os.getcwd()
#os.chdir(options.dir)
today = datetime.today()
day = str(today.date())
hour = str(today.time())
corruptn = str(options.corruption)
#output_filename = "gb_da." + "corruption_" + corruptn + "_" + day + "." + hour
#output_file = open(output_filename,'w')
#print >> output_file, "Run on " + str(datetime.now())
#os.chdir(current_dir)
data_set_file = openFile(str(options.inputfile), mode = 'r')
datafiles, labels = extract_labeled_chunkrange(data_set_file, num_files = 10)
datasets = load_data_labeled(datafiles, labels)
train_set_x, train_set_y = datasets[0]
data_set_file.close()
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_cols = train_set_x.get_value(borrow=True).shape[1]
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data matrix
##################################
# Build the GaussianBernoulli dA #
##################################
#rng = numpy.random.RandomState(2345)
#theano_rng = RandomStreams(rng.randint(2 ** 30))
#da = GaussianAutoEncoder(numpy_rng=rng, theano_rng=theano_rng, input=x,
#n_visible=n_cols, n_hidden=800)
#cost, updates = da.get_cost_updates(corruption_level=options.corruption,
#learning_rate=learning_rate)
#train_da = theano.function([index], cost, updates=updates,
#givens={x: train_set_x[index * batch_size:
#(index + 1) * batch_size]})
#start_time = time.clock()
#############
## TRAINING #
#############
## go through training epochs
#for epoch in xrange(training_epochs):
## go through training set
#c = []
#for batch_index in xrange(n_train_batches):
#c.append(train_da(batch_index))
#print >> output_file, 'Training epoch %d, cost ' % epoch, numpy.mean(c)
#end_time = time.clock()
#training_time = (end_time - start_time)
#print >> output_file, ('The ' + str(options.corruption) + ' corruption code for file ' +
#os.path.split(__file__)[1] +
#' ran for %.2fm' % ((training_time) / 60.))
#output_file.close()
##########
# Build the ReLU dA
##########
output_filename = "relu_da." + "corruption_" + corruptn + "_" + day + "." + hour
current_dir = os.getcwd()
os.chdir(options.dir)
output_file = open(output_filename,'w')
os.chdir(current_dir)
print >> output_file, "Run on " + str(datetime.now())
rng = numpy.random.RandomState(6789)
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = ReluAutoEncoder(numpy_rng=rng, theano_rng=theano_rng, input=x,
n_visible=n_cols, n_hidden=800)
cost, updates = da.get_cost_updates_safe(corruption_level=float(options.corruption),
learning_rate=learning_rate,mb_size=batch_size)
train_da = theano.function([index], cost, updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]})
start_time = time.clock()
##########
# Train the model
##########
# go through training epochs
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_da(batch_index))
print >> output_file, 'Training epoch %d, cost ' % epoch, numpy.mean(c)
end_time = time.clock()
training_time = (end_time - start_time)
print >> output_file, ('The ' + str(options.corruption) + ' corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((training_time) / 60.))
output_file.close()
if __name__ == '__main__':
drive_dA()
|
lzamparo/SdA_reduce
|
theano_models/dA/ReLU_vs_GB_test_script.py
|
Python
|
bsd-3-clause
| 5,659
|
[
"Gaussian"
] |
1fa9d9ed63b1292f4762fdb3b9ce6539f3b53d38c1e4b1c23a398f81f50ae86b
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Filter maf blocks for presence of wildcard columns. Blocks must meet the
criteria of having at least `min_good` columns, each of which has more than
`min_species` rows that are NOT wildcard bases ('*').
TODO: Allow specifying the character of the wildcard base.
usage: %prog min_good min_species < maf > maf
"""
from __future__ import division
import psyco_full
import sys
import sys
from bx.align import maf
from optparse import OptionParser
def main():
min_good = int( sys.argv[1] )
min_species = int( sys.argv[2] )
maf_reader = maf.Reader( sys.stdin )
maf_writer = maf.Writer( sys.stdout )
for m in maf_reader:
good = 0
for col in m.column_iter():
if col.count( '*' ) <= min_species:
good += 1
if good >= min_good:
maf_writer.write( m )
if __name__ == "__main__":
main()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/maf_filter_max_wc.py
|
Python
|
bsd-3-clause
| 925
|
[
"Galaxy"
] |
7efb71af9b75b46d789a17eb7676b9e61f66d166ce581b71e3b5126b4cbcf020
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
class SpectralOpsTest(test.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.cached_session(use_gpu=True)) as sess:
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder(dtype=dtypes.as_dtype(signal.dtype))
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = sess.run(
[actual_stft, actual_stft_from_ph, actual_inverse_stft],
feed_dict={signal_ph: signal})
actual_stft_ph = array_ops.placeholder(dtype=actual_stft.dtype)
actual_inverse_stft_from_ph = sess.run(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length),
feed_dict={actual_stft_ph: actual_stft})
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, 1e-4, 1e-4)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, 1e-4, 1e-4)
@test_util.disable_xla("This test never passed for XLA")
def test_shapes(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@test_util.disable_xla("This test never passed for XLA")
def test_stft_and_inverse_stft(self):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
# Tuples of (signal_length, frame_length, frame_step, fft_length).
test_configs = [
(512, 64, 32, 64),
(512, 64, 64, 64),
(512, 72, 64, 64),
(512, 64, 25, 64),
(512, 25, 15, 36),
(123, 23, 5, 42),
]
for signal_length, frame_length, frame_step, fft_length in test_configs:
signal = np.random.random(signal_length).astype(np.float32)
self._compare(signal, frame_length, frame_step, fft_length)
def test_stft_round_trip(self):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# threshold, corrected_threshold).
test_configs = [
# 87.5% overlap.
(4096, 256, 32, 256, 1e-5, 1e-6),
# 75% overlap.
(4096, 256, 64, 256, 1e-5, 1e-6),
# Odd frame hop.
(4096, 128, 25, 128, 1e-3, 1e-6),
# Odd frame length.
(4096, 127, 32, 128, 1e-3, 1e-6),
# 50% overlap.
(4096, 128, 64, 128, 0.40, 1e-6),
]
for (signal_length, frame_length, frame_step, fft_length, threshold,
corrected_threshold) in test_configs:
# Generate a random white Gaussian signal.
signal = random_ops.random_normal([signal_length])
with spectral_ops_test_util.fft_kernel_label_map(), (
self.cached_session(use_gpu=True)) as sess:
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
signal, inverse_stft, inverse_stft_corrected = sess.run(
[signal, inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
def test_inverse_stft_window_fn(self):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.cached_session(use_gpu=True) as sess:
hann_window, inverse_window = self.evaluate(
[hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
def test_inverse_stft_window_fn_special_case(self):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 64),
(128, 32),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.cached_session(use_gpu=True) as sess:
hann_window, inverse_window = self.evaluate(
[hann_window, inverse_window])
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)) as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
def test_gradients_numerical(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# stft_bound, inverse_stft_bound).
# TODO(rjryan): Investigate why STFT gradient error is so high.
test_configs = [
(64, 16, 8, 16),
(64, 16, 16, 16),
(64, 16, 7, 16),
(64, 7, 4, 9),
(29, 5, 1, 10),
]
for (signal_length, frame_length, frame_step, fft_length) in test_configs:
signal_shape = [signal_length]
signal = random_ops.random_uniform(signal_shape)
stft_shape = [max(0, 1 + (signal_length - frame_length) // frame_step),
fft_length // 2 + 1]
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft_shape = [(stft_shape[0] - 1) * frame_step + frame_length]
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
stft_error = test.compute_gradient_error(signal, [signal_length],
stft, stft_shape)
inverse_stft_error = test.compute_gradient_error(
stft, stft_shape, inverse_stft, inverse_stft_shape)
self.assertLess(stft_error, 2e-3)
self.assertLess(inverse_stft_error, 5e-4)
if __name__ == "__main__":
test.main()
|
apark263/tensorflow
|
tensorflow/python/kernel_tests/signal/spectral_ops_test.py
|
Python
|
apache-2.0
| 14,519
|
[
"Gaussian"
] |
517c866a289a066e102eedc08f37b9fc8b6daf3c3ce7650c1b3383fa1c5d5af9
|
from django.test import TestCase, tag
from edc_appointment.models import Appointment
from edc_base import get_utcnow
from edc_reference import site_reference_configs
from edc_visit_schedule.site_visit_schedules import site_visit_schedules
from edc_visit_tracking.constants import SCHEDULED
from ..metadata_wrappers import RequisitionMetadataWrapper, CrfMetadataWrapper
from ..metadata_wrappers import MetadataWrapperError
from ..metadata_wrappers import RequisitionMetadataWrappers, CrfMetadataWrappers
from ..models import CrfMetadata, RequisitionMetadata
from .models import SubjectConsent, SubjectVisit, CrfOne, SubjectRequisition
from .reference_configs import register_to_site_reference_configs
from .visit_schedule import visit_schedule
from edc_facility.import_holidays import import_holidays
from edc_lab.models.panel import Panel
class TestMetadataWrapperObjects(TestCase):
def setUp(self):
import_holidays()
self.panel_one = Panel.objects.create(name='one')
register_to_site_reference_configs()
site_visit_schedules._registry = {}
site_visit_schedules.loaded = False
site_visit_schedules.register(visit_schedule)
site_reference_configs.register_from_visit_schedule(
visit_models={
'edc_appointment.appointment': 'edc_metadata.subjectvisit'})
self.subject_identifier = '1111111'
subject_consent = SubjectConsent.objects.create(
subject_identifier=self.subject_identifier,
consent_datetime=get_utcnow())
_, self.schedule = site_visit_schedules.get_by_onschedule_model(
'edc_metadata.onschedule')
self.schedule.put_on_schedule(
subject_identifier=self.subject_identifier,
onschedule_datetime=subject_consent.consent_datetime)
self.appointment = Appointment.objects.get(
subject_identifier=self.subject_identifier,
visit_code=self.schedule.visits.first.code)
self.subject_visit = SubjectVisit.objects.create(
appointment=self.appointment,
subject_identifier=self.subject_identifier,
reason=SCHEDULED)
def test_crf_metadata_wrapper_none(self):
metadata_obj = CrfMetadata.objects.get(
subject_identifier=self.subject_identifier,
model='edc_metadata.crfone')
crf_metadata_wrapper = CrfMetadataWrapper(
visit=self.subject_visit,
metadata_obj=metadata_obj)
self.assertEqual(crf_metadata_wrapper.model_cls, CrfOne)
self.assertEqual(crf_metadata_wrapper.model_obj, None)
self.assertEqual(crf_metadata_wrapper.metadata_obj, metadata_obj)
self.assertEqual(crf_metadata_wrapper.visit, self.subject_visit)
def test_crf_metadata_wrapper_exists(self):
model_obj = CrfOne.objects.create(
subject_visit=self.subject_visit)
metadata_obj = CrfMetadata.objects.get(
subject_identifier=self.subject_identifier,
model='edc_metadata.crfone')
crf_metadata_wrapper = CrfMetadataWrapper(
visit=self.subject_visit,
metadata_obj=metadata_obj)
self.assertEqual(crf_metadata_wrapper.model_cls, CrfOne)
self.assertEqual(crf_metadata_wrapper.model_obj, model_obj)
self.assertEqual(crf_metadata_wrapper.metadata_obj, metadata_obj)
self.assertEqual(crf_metadata_wrapper.visit, self.subject_visit)
def test_requisition_metadata_wrapper_none(self):
metadata_obj = RequisitionMetadata.objects.get(
subject_identifier=self.subject_identifier,
model='edc_metadata.subjectrequisition',
panel_name=self.panel_one.name)
requisition_metadata_wrapper = RequisitionMetadataWrapper(
visit=self.subject_visit,
metadata_obj=metadata_obj)
self.assertEqual(
requisition_metadata_wrapper.model_cls, SubjectRequisition)
self.assertEqual(requisition_metadata_wrapper.model_obj, None)
self.assertEqual(
requisition_metadata_wrapper.metadata_obj, metadata_obj)
self.assertEqual(requisition_metadata_wrapper.visit,
self.subject_visit)
def test_requisition_metadata_wrapper_exists(self):
model_obj = SubjectRequisition.objects.create(
subject_visit=self.subject_visit,
panel=self.panel_one)
metadata_obj = RequisitionMetadata.objects.get(
subject_identifier=self.subject_identifier,
model='edc_metadata.subjectrequisition',
panel_name=self.panel_one.name)
requisition_metadata_wrapper = RequisitionMetadataWrapper(
visit=self.subject_visit,
metadata_obj=metadata_obj)
self.assertEqual(
requisition_metadata_wrapper.model_cls, SubjectRequisition)
self.assertEqual(requisition_metadata_wrapper.model_obj, model_obj)
self.assertEqual(
requisition_metadata_wrapper.metadata_obj, metadata_obj)
self.assertEqual(requisition_metadata_wrapper.visit,
self.subject_visit)
def test_crf_metadata_wrapper_raises_on_invalid_model(self):
metadata_obj = CrfMetadata.objects.create(
subject_identifier=self.subject_identifier,
model='edc_metadata.blah',
show_order=9999)
self.assertRaises(
MetadataWrapperError,
CrfMetadataWrapper,
visit=self.subject_visit,
metadata_obj=metadata_obj)
def test_crf_metadata_wrapper_raises_on_missing_crf_model_manager(self):
metadata_obj = CrfMetadata.objects.create(
subject_identifier=self.subject_identifier,
model='edc_metadata.crfmissingmanager',
show_order=9999)
self.assertRaises(
MetadataWrapperError,
CrfMetadataWrapper,
visit=self.subject_visit,
metadata_obj=metadata_obj)
def test_get_crfs(self):
crf_metadata_wrappers = CrfMetadataWrappers(
appointment=self.appointment)
self.assertEqual(len(crf_metadata_wrappers.objects), 5)
def test_get_requisitions(self):
requisition_metadata_wrappers = RequisitionMetadataWrappers(
appointment=self.appointment)
self.assertEqual(len(requisition_metadata_wrappers.objects), 6)
|
botswana-harvard/edc-meta-data
|
edc_metadata/tests/test_metadata_wrappers.py
|
Python
|
gpl-2.0
| 6,408
|
[
"VisIt"
] |
1e04e9d41922383d3ff6d612c514aa01b78f9a439321792723081b1128953c70
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from unittest import TestCase
from exatomic.molcas.editor import Editor
class TestEditor(TestCase):
"""Tests that metadata is set appropriately for Molcas editors."""
def test_no_meta(self):
"""Test that program metadata is set by default."""
fl = Editor('', ignore=True)
self.assertTrue(fl.meta['program'] == 'molcas')
def test_with_meta(self):
"""Test that passed metadata is respected and program is set."""
fl = Editor('', meta={'meta': 'data'}, ignore=True)
self.assertEqual(fl.meta['meta'], 'data')
self.assertEqual(fl.meta['program'], 'molcas')
|
exa-analytics/atomic
|
exatomic/molcas/tests/test_editor.py
|
Python
|
apache-2.0
| 762
|
[
"MOLCAS"
] |
39c38b46599133fd0488db24c16e19506008da2505657e303dd5f34d83eb87a7
|
# coding=utf-8
"""
Tests for the course home page.
"""
from datetime import datetime, timedelta
import ddt
import mock
from django.conf import settings
from django.urls import reverse
from django.http import QueryDict
from django.utils.http import urlquote_plus
from django.utils.timezone import now
from pytz import UTC
from waffle.models import Flag
from waffle.testutils import override_flag
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_COMMUNITY_TA
)
from django_comment_client.tests.factories import RoleFactory
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from courseware.tests.helpers import get_expiration_banner_text
from experiments.models import ExperimentKeyValue
from lms.djangoapps.commerce.models import CommerceConfiguration
from lms.djangoapps.commerce.utils import EcommerceService
from lms.djangoapps.course_goals.api import add_course_goal, remove_course_goal
from lms.djangoapps.courseware.tests.factories import (
InstructorFactory,
StaffFactory,
BetaTesterFactory,
OrgStaffFactory,
OrgInstructorFactory,
GlobalStaffFactory,
)
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.schedules.tests.factories import ScheduleFactory
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES, override_waffle_flag
from openedx.features.course_duration_limits.config import EXPERIMENT_ID
from openedx.features.course_duration_limits.models import CourseDurationLimitConfig
from openedx.features.course_experience import (
SHOW_REVIEWS_TOOL_FLAG,
SHOW_UPGRADE_MSG_ON_COURSE_HOME,
UNIFIED_COURSE_TAB_FLAG,
COURSE_ENABLE_UNENROLLED_ACCESS_FLAG,
)
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from util.date_utils import strftime_localized
from xmodule.course_module import COURSE_VISIBILITY_PRIVATE, COURSE_VISIBILITY_PUBLIC_OUTLINE, COURSE_VISIBILITY_PUBLIC
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import CourseUserType, ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from ... import COURSE_PRE_START_ACCESS_FLAG, ENABLE_COURSE_GOALS
from .helpers import add_course_mode
from .test_course_updates import create_course_update, remove_course_updates
TEST_PASSWORD = 'test'
TEST_CHAPTER_NAME = 'Test Chapter'
TEST_COURSE_TOOLS = 'Course Tools'
TEST_COURSE_TODAY = 'Today is'
TEST_BANNER_CLASS = '<div class="course-expiration-message">'
TEST_WELCOME_MESSAGE = '<h2>Welcome!</h2>'
TEST_UPDATE_MESSAGE = '<h2>Test Update!</h2>'
TEST_COURSE_UPDATES_TOOL = '/course/updates">'
TEST_COURSE_HOME_MESSAGE = 'course-message'
TEST_COURSE_HOME_MESSAGE_ANONYMOUS = '/login'
TEST_COURSE_HOME_MESSAGE_UNENROLLED = 'Enroll now'
TEST_COURSE_HOME_MESSAGE_PRE_START = 'Course starts in'
TEST_COURSE_GOAL_OPTIONS = 'goal-options-container'
TEST_COURSE_GOAL_UPDATE_FIELD = 'section-goals'
TEST_COURSE_GOAL_UPDATE_FIELD_HIDDEN = 'section-goals hidden'
COURSE_GOAL_DISMISS_OPTION = 'unsure'
THREE_YEARS_AGO = now() - timedelta(days=(365 * 3))
QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
def course_home_url(course):
"""
Returns the URL for the course's home page.
Arguments:
course (CourseDescriptor): The course being tested.
"""
return course_home_url_from_string(unicode(course.id))
def course_home_url_from_string(course_key_string):
"""
Returns the URL for the course's home page.
Arguments:
course_key_string (String): The course key as string.
"""
return reverse(
'openedx.course_experience.course_home',
kwargs={
'course_id': course_key_string,
}
)
class CourseHomePageTestCase(SharedModuleStoreTestCase):
"""
Base class for testing the course home page.
"""
@classmethod
def setUpClass(cls):
"""
Set up a course to be used for testing.
"""
# pylint: disable=super-method-not-called
with cls.setUpClassAndTestData():
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = CourseFactory.create(
org='edX',
number='test',
display_name='Test Course',
start=now() - timedelta(days=30),
)
with cls.store.bulk_operations(cls.course.id):
chapter = ItemFactory.create(
category='chapter',
parent_location=cls.course.location,
display_name=TEST_CHAPTER_NAME,
)
section = ItemFactory.create(category='sequential', parent_location=chapter.location)
section2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=section.location)
ItemFactory.create(category='vertical', parent_location=section2.location)
@classmethod
def setUpTestData(cls):
"""Set up and enroll our fake user in the course."""
super(CourseHomePageTestCase, cls).setUpTestData()
cls.staff_user = StaffFactory(course_key=cls.course.id, password=TEST_PASSWORD)
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
def create_future_course(self, specific_date=None):
"""
Creates and returns a course in the future.
"""
return CourseFactory.create(
display_name='Test Future Course',
start=specific_date if specific_date else now() + timedelta(days=30),
)
class TestCourseHomePage(CourseHomePageTestCase):
def setUp(self):
super(TestCourseHomePage, self).setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def tearDown(self):
remove_course_updates(self.user, self.course)
super(TestCourseHomePage, self).tearDown()
def test_welcome_message_when_unified(self):
# Create a welcome message
create_course_update(self.course, self.user, TEST_WELCOME_MESSAGE)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_WELCOME_MESSAGE, status_code=200)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=False)
def test_welcome_message_when_not_unified(self):
# Create a welcome message
create_course_update(self.course, self.user, TEST_WELCOME_MESSAGE)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertNotContains(response, TEST_WELCOME_MESSAGE, status_code=200)
def test_updates_tool_visibility(self):
"""
Verify that the updates course tool is visible only when the course
has one or more updates.
"""
url = course_home_url(self.course)
response = self.client.get(url)
self.assertNotContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)
create_course_update(self.course, self.user, TEST_UPDATE_MESSAGE)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_UPDATES_TOOL, status_code=200)
def test_queries(self):
"""
Verify that the view's query count doesn't regress.
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2018, 1, 1))
# Pre-fetch the view to populate any caches
course_home_url(self.course)
# Fetch the view and verify the query counts
# TODO: decrease query count as part of REVO-28
with self.assertNumQueries(87, table_blacklist=QUERY_COUNT_TABLE_BLACKLIST):
with check_mongo_calls(4):
url = course_home_url(self.course)
self.client.get(url)
@mock.patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_start_date_handling(self):
"""
Verify that the course home page handles start dates correctly.
"""
# The course home page should 404 for a course starting in the future
future_course = self.create_future_course(datetime(2030, 1, 1, tzinfo=UTC))
url = course_home_url(future_course)
response = self.client.get(url)
self.assertRedirects(response, '/dashboard?notlive=Jan+01%2C+2030')
# With the Waffle flag enabled, the course should be visible
with override_flag(COURSE_PRE_START_ACCESS_FLAG.namespaced_flag_name, True):
url = course_home_url(future_course)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@ddt.ddt
class TestCourseHomePageAccess(CourseHomePageTestCase):
"""
Test access to the course home page.
"""
def setUp(self):
super(TestCourseHomePageAccess, self).setUp()
# Make this a verified course so that an upgrade message might be shown
add_course_mode(self.course, upgrade_deadline_expired=False)
# Add a welcome message
create_course_update(self.course, self.staff_user, TEST_WELCOME_MESSAGE)
def tearDown(self):
remove_course_updates(self.staff_user, self.course)
super(TestCourseHomePageAccess, self).tearDown()
@override_waffle_flag(SHOW_REVIEWS_TOOL_FLAG, active=True)
@ddt.data(
[False, COURSE_VISIBILITY_PRIVATE, CourseUserType.ANONYMOUS, True, False],
[False, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.ANONYMOUS, True, False],
[False, COURSE_VISIBILITY_PUBLIC, CourseUserType.ANONYMOUS, True, False],
[True, COURSE_VISIBILITY_PRIVATE, CourseUserType.ANONYMOUS, True, False],
[True, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.ANONYMOUS, True, True],
[True, COURSE_VISIBILITY_PUBLIC, CourseUserType.ANONYMOUS, True, True],
[False, COURSE_VISIBILITY_PRIVATE, CourseUserType.UNENROLLED, True, False],
[False, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.UNENROLLED, True, False],
[False, COURSE_VISIBILITY_PUBLIC, CourseUserType.UNENROLLED, True, False],
[True, COURSE_VISIBILITY_PRIVATE, CourseUserType.UNENROLLED, True, False],
[True, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.UNENROLLED, True, True],
[True, COURSE_VISIBILITY_PUBLIC, CourseUserType.UNENROLLED, True, True],
[False, COURSE_VISIBILITY_PRIVATE, CourseUserType.ENROLLED, False, True],
[True, COURSE_VISIBILITY_PRIVATE, CourseUserType.ENROLLED, False, True],
[True, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.ENROLLED, False, True],
[True, COURSE_VISIBILITY_PUBLIC, CourseUserType.ENROLLED, False, True],
[False, COURSE_VISIBILITY_PRIVATE, CourseUserType.UNENROLLED_STAFF, True, True],
[True, COURSE_VISIBILITY_PRIVATE, CourseUserType.UNENROLLED_STAFF, True, True],
[True, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.UNENROLLED_STAFF, True, True],
[True, COURSE_VISIBILITY_PUBLIC, CourseUserType.UNENROLLED_STAFF, True, True],
[False, COURSE_VISIBILITY_PRIVATE, CourseUserType.GLOBAL_STAFF, True, True],
[True, COURSE_VISIBILITY_PRIVATE, CourseUserType.GLOBAL_STAFF, True, True],
[True, COURSE_VISIBILITY_PUBLIC_OUTLINE, CourseUserType.GLOBAL_STAFF, True, True],
[True, COURSE_VISIBILITY_PUBLIC, CourseUserType.GLOBAL_STAFF, True, True],
)
@ddt.unpack
def test_home_page(
self, enable_unenrolled_access, course_visibility, user_type,
expected_enroll_message, expected_course_outline,
):
self.create_user_for_course(self.course, user_type)
# Render the course home page
with mock.patch('xmodule.course_module.CourseDescriptor.course_visibility', course_visibility):
# Test access with anonymous flag and course visibility
with override_waffle_flag(COURSE_ENABLE_UNENROLLED_ACCESS_FLAG, enable_unenrolled_access):
url = course_home_url(self.course)
response = self.client.get(url)
# Verify that the course tools and dates are always shown
self.assertContains(response, TEST_COURSE_TOOLS)
self.assertContains(response, TEST_COURSE_TODAY)
is_anonymous = user_type is CourseUserType.ANONYMOUS
is_enrolled = user_type is CourseUserType.ENROLLED
is_enrolled_or_staff = is_enrolled or user_type in (
CourseUserType.UNENROLLED_STAFF, CourseUserType.GLOBAL_STAFF
)
self.assertContains(response, 'Learn About Verified Certificate', count=(1 if is_enrolled else 0))
# Verify that start button, course sock, and welcome message
# are only shown to enrolled users or staff.
self.assertContains(response, 'Start Course', count=(1 if is_enrolled_or_staff else 0))
self.assertContains(response, TEST_WELCOME_MESSAGE, count=(1 if is_enrolled_or_staff else 0))
# Verify the outline is shown to enrolled users, unenrolled_staff and anonymous users if allowed
self.assertContains(response, TEST_CHAPTER_NAME, count=(1 if expected_course_outline else 0))
# Verify that the expected message is shown to the user
if not enable_unenrolled_access or course_visibility != COURSE_VISIBILITY_PUBLIC:
self.assertContains(
response, 'To see course content', count=(1 if is_anonymous else 0)
)
self.assertContains(response, '<div class="user-messages"', count=(1 if expected_enroll_message else 0))
if expected_enroll_message:
self.assertContains(response, 'You must be enrolled in the course to see course content.')
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=False)
@override_waffle_flag(SHOW_REVIEWS_TOOL_FLAG, active=True)
@ddt.data(
[CourseUserType.ANONYMOUS, 'To see course content'],
[CourseUserType.ENROLLED, None],
[CourseUserType.UNENROLLED, 'You must be enrolled in the course to see course content.'],
[CourseUserType.UNENROLLED_STAFF, 'You must be enrolled in the course to see course content.'],
)
@ddt.unpack
def test_home_page_not_unified(self, user_type, expected_message):
"""
Verifies the course home tab when not unified.
"""
self.create_user_for_course(self.course, user_type)
# Render the course home page
url = course_home_url(self.course)
response = self.client.get(url)
# Verify that the course tools and dates are always shown
self.assertContains(response, TEST_COURSE_TOOLS)
self.assertContains(response, TEST_COURSE_TODAY)
# Verify that welcome messages are never shown
self.assertNotContains(response, TEST_WELCOME_MESSAGE)
# Verify that the outline, start button, course sock, and welcome message
# are only shown to enrolled users.
is_enrolled = user_type is CourseUserType.ENROLLED
is_unenrolled_staff = user_type is CourseUserType.UNENROLLED_STAFF
expected_count = 1 if (is_enrolled or is_unenrolled_staff) else 0
self.assertContains(response, TEST_CHAPTER_NAME, count=expected_count)
self.assertContains(response, 'Start Course', count=expected_count)
self.assertContains(response, 'Learn About Verified Certificate', count=(1 if is_enrolled else 0))
# Verify that the expected message is shown to the user
self.assertContains(response, '<div class="user-messages"', count=1 if expected_message else 0)
if expected_message:
self.assertContains(response, expected_message)
def test_sign_in_button(self):
"""
Verify that the sign in button will return to this page.
"""
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, '/login?next={url}'.format(url=urlquote_plus(url)))
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_non_live_course(self):
"""
Ensure that a user accessing a non-live course sees a redirect to
the student dashboard, not a 404.
"""
future_course = self.create_future_course()
self.create_user_for_course(future_course, CourseUserType.ENROLLED)
url = course_home_url(future_course)
response = self.client.get(url)
start_date = strftime_localized(future_course.start, 'SHORT_DATE')
expected_params = QueryDict(mutable=True)
expected_params['notlive'] = start_date
expected_url = '{url}?{params}'.format(
url=reverse('dashboard'),
params=expected_params.urlencode()
)
self.assertRedirects(response, expected_url)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_course_does_not_expire_for_verified_user(self):
"""
There are a number of different roles/users that should not lose access after the expiration date.
Ensure that users who should not lose access get a 200 (ok) response
when attempting to visit the course after their would be expiration date.
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = UserFactory.create(password=self.TEST_PASSWORD)
ScheduleFactory(
start=THREE_YEARS_AGO,
enrollment__mode=CourseMode.VERIFIED,
enrollment__course_id=course.id,
enrollment__user=user
)
# ensure that the user who has indefinite access
self.client.login(username=user.username, password=self.TEST_PASSWORD)
response = self.client.get(url)
self.assertEqual(
response.status_code,
200,
"Should not expire access for user",
)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
@ddt.data(
InstructorFactory,
StaffFactory,
BetaTesterFactory,
OrgStaffFactory,
OrgInstructorFactory,
)
def test_course_does_not_expire_for_course_staff(self, role_factory):
"""
There are a number of different roles/users that should not lose access after the expiration date.
Ensure that users who should not lose access get a 200 (ok) response
when attempting to visit the course after their would be expiration date.
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = role_factory.create(password=self.TEST_PASSWORD, course_key=course.id)
ScheduleFactory(
start=THREE_YEARS_AGO,
enrollment__mode=CourseMode.AUDIT,
enrollment__course_id=course.id,
enrollment__user=user
)
# ensure that the user has indefinite access
self.client.login(username=user.username, password=self.TEST_PASSWORD)
response = self.client.get(url)
self.assertEqual(
response.status_code,
200,
"Should not expire access for user",
)
@ddt.data(
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_ADMINISTRATOR
)
def test_course_does_not_expire_for_user_with_course_role(self, role_name):
"""
Test that users with the above roles for a course do not lose access
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = UserFactory.create()
role = RoleFactory(name=role_name, course_id=course.id)
role.users.add(user)
# ensure the user has indefinite access
self.client.login(username=user.username, password=self.TEST_PASSWORD)
response = self.client.get(url)
self.assertEqual(
response.status_code,
200,
"Should not expire access for user"
)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
@ddt.data(
GlobalStaffFactory,
)
def test_course_does_not_expire_for_global_users(self, role_factory):
"""
There are a number of different roles/users that should not lose access after the expiration date.
Ensure that users who should not lose access get a 200 (ok) response
when attempting to visit the course after their would be expiration date.
"""
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
user = role_factory.create(password=self.TEST_PASSWORD)
ScheduleFactory(
start=THREE_YEARS_AGO,
enrollment__mode=CourseMode.AUDIT,
enrollment__course_id=course.id,
enrollment__user=user
)
# ensure that the user who has indefinite access
self.client.login(username=user.username, password=self.TEST_PASSWORD)
response = self.client.get(url)
self.assertEqual(
response.status_code,
200,
"Should not expire access for user",
)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_expired_course(self):
"""
Ensure that a user accessing an expired course sees a redirect to
the student dashboard, not a 404.
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1))
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
for mode in [CourseMode.AUDIT, CourseMode.VERIFIED]:
CourseModeFactory.create(course_id=course.id, mode_slug=mode)
# assert that an if an expired audit user tries to access the course they are redirected to the dashboard
audit_user = UserFactory(password=self.TEST_PASSWORD)
self.client.login(username=audit_user.username, password=self.TEST_PASSWORD)
audit_enrollment = CourseEnrollment.enroll(audit_user, course.id, mode=CourseMode.AUDIT)
ScheduleFactory(start=THREE_YEARS_AGO, enrollment=audit_enrollment)
response = self.client.get(url)
expiration_date = strftime_localized(course.start + timedelta(weeks=4), '%b. %-d, %Y')
expected_params = QueryDict(mutable=True)
course_name = CourseOverview.get_from_id(course.id).display_name_with_default
expected_params['access_response_error'] = 'Access to {run} expired on {expiration_date}'.format(
run=course_name,
expiration_date=expiration_date
)
expected_url = '{url}?{params}'.format(
url=reverse('dashboard'),
params=expected_params.urlencode()
)
self.assertRedirects(response, expected_url)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_expiration_banner_with_expired_upgrade_deadline(self):
"""
Ensure that a user accessing a course with an expired upgrade deadline
will still see the course expiration banner without the upgrade related text.
"""
past = datetime(2010, 1, 1)
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=past)
course = CourseFactory.create(start=now() - timedelta(days=10))
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED, expiration_datetime=past)
user = UserFactory(password=self.TEST_PASSWORD)
self.client.login(username=user.username, password=self.TEST_PASSWORD)
CourseEnrollment.enroll(user, course.id, mode=CourseMode.AUDIT)
url = course_home_url(course)
response = self.client.get(url)
bannerText = get_expiration_banner_text(user, course)
self.assertContains(response, bannerText, html=True)
self.assertContains(response, TEST_BANNER_CLASS)
def test_audit_only_not_expired(self):
"""
Verify that enrolled users are NOT shown the course expiration banner and can
access the course home page if course audit only
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1))
audit_only_course = CourseFactory.create()
self.create_user_for_course(audit_only_course, CourseUserType.ENROLLED)
response = self.client.get(course_home_url(audit_only_course))
self.assertEqual(response.status_code, 200)
self.assertContains(response, TEST_COURSE_TOOLS)
self.assertContains(response, TEST_COURSE_TODAY)
self.assertNotContains(response, TEST_BANNER_CLASS)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
def test_expired_course_in_holdback(self):
"""
Ensure that a user accessing an expired course that is in the holdback
does not get redirected to the student dashboard, not a 404.
"""
CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1))
course = CourseFactory.create(start=THREE_YEARS_AGO)
url = course_home_url(course)
for mode in [CourseMode.AUDIT, CourseMode.VERIFIED]:
CourseModeFactory.create(course_id=course.id, mode_slug=mode)
ExperimentKeyValue.objects.create(
experiment_id=EXPERIMENT_ID,
key="content_type_gating_holdback_percentage",
value="100"
)
# assert that an if an expired audit user in the holdback tries to access the course
# they are not redirected to the dashboard
audit_user = UserFactory(password=self.TEST_PASSWORD)
self.client.login(username=audit_user.username, password=self.TEST_PASSWORD)
audit_enrollment = CourseEnrollment.enroll(audit_user, course.id, mode=CourseMode.AUDIT)
ScheduleFactory(start=THREE_YEARS_AGO, enrollment=audit_enrollment)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@mock.patch.dict(settings.FEATURES, {'DISABLE_START_DATES': False})
@mock.patch("util.date_utils.strftime_localized")
def test_non_live_course_other_language(self, mock_strftime_localized):
"""
Ensure that a user accessing a non-live course sees a redirect to
the student dashboard, not a 404, even if the localized date is unicode
"""
future_course = self.create_future_course()
self.create_user_for_course(future_course, CourseUserType.ENROLLED)
fake_unicode_start_time = u"üñîçø∂é_ßtå®t_tîµé"
mock_strftime_localized.return_value = fake_unicode_start_time
url = course_home_url(future_course)
response = self.client.get(url)
expected_params = QueryDict(mutable=True)
expected_params['notlive'] = fake_unicode_start_time
expected_url = u'{url}?{params}'.format(
url=reverse('dashboard'),
params=expected_params.urlencode()
)
self.assertRedirects(response, expected_url)
def test_nonexistent_course(self):
"""
Ensure a non-existent course results in a 404.
"""
self.create_user_for_course(self.course, CourseUserType.ANONYMOUS)
url = course_home_url_from_string('not/a/course')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_waffle_flag(COURSE_PRE_START_ACCESS_FLAG, active=True)
def test_course_messaging(self):
"""
Ensure that the following four use cases work as expected
1) Anonymous users are shown a course message linking them to the login page
2) Unenrolled users are shown a course message allowing them to enroll
3) Enrolled users who show up on the course page after the course has begun
are not shown a course message.
4) Enrolled users who show up on the course page after the course has begun will
see the course expiration banner if course duration limits are on for the course.
5) Enrolled users who show up on the course page before the course begins
are shown a message explaining when the course starts as well as a call to
action button that allows them to add a calendar event.
"""
# Verify that anonymous users are shown a login link in the course message
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_ANONYMOUS)
# Verify that unenrolled users are shown an enroll call to action message
user = self.create_user_for_course(self.course, CourseUserType.UNENROLLED)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_UNENROLLED)
# Verify that enrolled users are not shown any state warning message when enrolled and course has begun.
CourseEnrollment.enroll(user, self.course.id)
url = course_home_url(self.course)
response = self.client.get(url)
self.assertNotContains(response, TEST_COURSE_HOME_MESSAGE_ANONYMOUS)
self.assertNotContains(response, TEST_COURSE_HOME_MESSAGE_UNENROLLED)
self.assertNotContains(response, TEST_COURSE_HOME_MESSAGE_PRE_START)
# Verify that enrolled users are shown the course expiration banner if content gating is enabled
# We use .save() explicitly here (rather than .objects.create) in order to force the
# cache to refresh.
config = CourseDurationLimitConfig(
course=CourseOverview.get_from_id(self.course.id),
enabled=True,
enabled_as_of=datetime(2018, 1, 1)
)
config.save()
url = course_home_url(self.course)
response = self.client.get(url)
bannerText = get_expiration_banner_text(user, self.course)
self.assertContains(response, bannerText, html=True)
# Verify that enrolled users are not shown the course expiration banner if content gating is disabled
config.enabled = False
config.save()
url = course_home_url(self.course)
response = self.client.get(url)
bannerText = get_expiration_banner_text(user, self.course)
self.assertNotContains(response, bannerText, html=True)
# Verify that enrolled users are shown 'days until start' message before start date
future_course = self.create_future_course()
CourseEnrollment.enroll(user, future_course.id)
url = course_home_url(future_course)
response = self.client.get(url)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE)
self.assertContains(response, TEST_COURSE_HOME_MESSAGE_PRE_START)
def test_course_messaging_for_staff(self):
"""
Staff users will not see the expiration banner when course duration limits
are on for the course.
"""
config = CourseDurationLimitConfig(
course=CourseOverview.get_from_id(self.course.id),
enabled=True,
enabled_as_of=datetime(2018, 1, 1)
)
config.save()
url = course_home_url(self.course)
CourseEnrollment.enroll(self.staff_user, self.course.id)
response = self.client.get(url)
bannerText = get_expiration_banner_text(self.staff_user, self.course)
self.assertNotContains(response, bannerText, html=True)
@override_waffle_flag(COURSE_PRE_START_ACCESS_FLAG, active=True)
@override_waffle_flag(ENABLE_COURSE_GOALS, active=True)
def test_course_goals(self):
"""
Ensure that the following five use cases work as expected.
1) Unenrolled users are not shown the set course goal message.
2) Enrolled users are shown the set course goal message if they have not yet set a course goal.
3) Enrolled users are not shown the set course goal message if they have set a course goal.
4) Enrolled and verified users are not shown the set course goal message.
5) Enrolled users are not shown the set course goal message in a course that cannot be verified.
"""
# Create a course with a verified track.
verifiable_course = CourseFactory.create()
add_course_mode(verifiable_course, upgrade_deadline_expired=False)
# Verify that unenrolled users are not shown the set course goal message.
user = self.create_user_for_course(verifiable_course, CourseUserType.UNENROLLED)
response = self.client.get(course_home_url(verifiable_course))
self.assertNotContains(response, TEST_COURSE_GOAL_OPTIONS)
# Verify that enrolled users are shown the set course goal message in a verified course.
CourseEnrollment.enroll(user, verifiable_course.id)
response = self.client.get(course_home_url(verifiable_course))
self.assertContains(response, TEST_COURSE_GOAL_OPTIONS)
# Verify that enrolled users that have set a course goal are not shown the set course goal message.
add_course_goal(user, verifiable_course.id, COURSE_GOAL_DISMISS_OPTION)
response = self.client.get(course_home_url(verifiable_course))
self.assertNotContains(response, TEST_COURSE_GOAL_OPTIONS)
# Verify that enrolled and verified users are not shown the set course goal message.
remove_course_goal(user, str(verifiable_course.id))
CourseEnrollment.enroll(user, verifiable_course.id, CourseMode.VERIFIED)
response = self.client.get(course_home_url(verifiable_course))
self.assertNotContains(response, TEST_COURSE_GOAL_OPTIONS)
# Verify that enrolled users are not shown the set course goal message in an audit only course.
audit_only_course = CourseFactory.create()
CourseEnrollment.enroll(user, audit_only_course.id)
response = self.client.get(course_home_url(audit_only_course))
self.assertNotContains(response, TEST_COURSE_GOAL_OPTIONS)
@override_waffle_flag(COURSE_PRE_START_ACCESS_FLAG, active=True)
@override_waffle_flag(ENABLE_COURSE_GOALS, active=True)
def test_course_goal_updates(self):
"""
Ensure that the following five use cases work as expected.
1) Unenrolled users are not shown the update goal selection field.
2) Enrolled users are not shown the update goal selection field if they have not yet set a course goal.
3) Enrolled users are shown the update goal selection field if they have set a course goal.
4) Enrolled users in the verified track are shown the update goal selection field.
"""
# Create a course with a verified track.
verifiable_course = CourseFactory.create()
add_course_mode(verifiable_course, upgrade_deadline_expired=False)
# Verify that unenrolled users are not shown the update goal selection field.
user = self.create_user_for_course(verifiable_course, CourseUserType.UNENROLLED)
response = self.client.get(course_home_url(verifiable_course))
self.assertNotContains(response, TEST_COURSE_GOAL_UPDATE_FIELD)
# Verify that enrolled users that have not set a course goal are shown a hidden update goal selection field.
enrollment = CourseEnrollment.enroll(user, verifiable_course.id)
response = self.client.get(course_home_url(verifiable_course))
self.assertContains(response, TEST_COURSE_GOAL_UPDATE_FIELD_HIDDEN)
# Verify that enrolled users that have set a course goal are shown a visible update goal selection field.
add_course_goal(user, verifiable_course.id, COURSE_GOAL_DISMISS_OPTION)
response = self.client.get(course_home_url(verifiable_course))
self.assertContains(response, TEST_COURSE_GOAL_UPDATE_FIELD)
self.assertNotContains(response, TEST_COURSE_GOAL_UPDATE_FIELD_HIDDEN)
# Verify that enrolled and verified users are shown the update goal selection
CourseEnrollment.update_enrollment(enrollment, is_active=True, mode=CourseMode.VERIFIED)
response = self.client.get(course_home_url(verifiable_course))
self.assertContains(response, TEST_COURSE_GOAL_UPDATE_FIELD)
self.assertNotContains(response, TEST_COURSE_GOAL_UPDATE_FIELD_HIDDEN)
class CourseHomeFragmentViewTests(ModuleStoreTestCase):
"""
Test Messages Displayed on the Course Home
"""
CREATE_USER = False
def setUp(self):
super(CourseHomeFragmentViewTests, self).setUp()
CommerceConfiguration.objects.create(checkout_on_ecommerce_service=True)
end = now() + timedelta(days=30)
self.course = CourseFactory(
start=now() - timedelta(days=30),
end=end,
)
self.url = course_home_url(self.course)
CourseMode.objects.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
self.verified_mode = CourseMode.objects.create(
course_id=self.course.id,
mode_slug=CourseMode.VERIFIED,
min_price=100,
expiration_datetime=end,
sku='test'
)
self.user = UserFactory()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
name = SHOW_UPGRADE_MSG_ON_COURSE_HOME.waffle_namespace._namespaced_name(
SHOW_UPGRADE_MSG_ON_COURSE_HOME.flag_name)
self.flag, __ = Flag.objects.update_or_create(name=name, defaults={'everyone': True})
def assert_upgrade_message_not_displayed(self):
response = self.client.get(self.url)
self.assertNotIn('section-upgrade', response.content)
def assert_upgrade_message_displayed(self):
response = self.client.get(self.url)
self.assertIn('section-upgrade', response.content)
url = EcommerceService().get_checkout_page_url(self.verified_mode.sku)
self.assertIn('<a class="btn-brand btn-upgrade"', response.content)
self.assertIn(url, response.content)
self.assertIn('Upgrade (${price})'.format(price=self.verified_mode.min_price), response.content)
def test_no_upgrade_message_if_logged_out(self):
self.client.logout()
self.assert_upgrade_message_not_displayed()
def test_no_upgrade_message_if_not_enrolled(self):
self.assertEqual(len(CourseEnrollment.enrollments_for_user(self.user)), 0)
self.assert_upgrade_message_not_displayed()
def test_no_upgrade_message_if_verified_track(self):
CourseEnrollment.enroll(self.user, self.course.id, CourseMode.VERIFIED)
self.assert_upgrade_message_not_displayed()
def test_no_upgrade_message_if_upgrade_deadline_passed(self):
self.verified_mode.expiration_datetime = now() - timedelta(days=20)
self.verified_mode.save()
self.assert_upgrade_message_not_displayed()
def test_no_upgrade_message_if_flag_disabled(self):
self.flag.everyone = False
self.flag.save()
CourseEnrollment.enroll(self.user, self.course.id, CourseMode.AUDIT)
self.assert_upgrade_message_not_displayed()
def test_display_upgrade_message_if_audit_and_deadline_not_passed(self):
CourseEnrollment.enroll(self.user, self.course.id, CourseMode.AUDIT)
self.assert_upgrade_message_displayed()
|
philanthropy-u/edx-platform
|
openedx/features/course_experience/tests/views/test_course_home.py
|
Python
|
agpl-3.0
| 39,942
|
[
"VisIt"
] |
f2a5839966e46be4b638ceffeddac7f5d5429504efee96525e2da7a209ddbe08
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs pose embedding model inference.
Currently, we support loading model inputs from a CSV file. The CSV file is
expected to have:
1. The first row as header, including the following values:
image/width,
image/height,
image/object/part/NOSE_TIP/center/x,
image/object/part/NOSE_TIP/center/y,
image/object/part/NOSE_TIP/score,
image/object/part/LEFT_SHOULDER/center/x,
image/object/part/LEFT_SHOULDER/center/y,
image/object/part/LEFT_SHOULDER/score,
image/object/part/RIGHT_SHOULDER/center/x,
image/object/part/RIGHT_SHOULDER/center/y,
image/object/part/RIGHT_SHOULDER/score,
image/object/part/LEFT_ELBOW/center/x,
image/object/part/LEFT_ELBOW/center/y,
image/object/part/LEFT_ELBOW/score,
image/object/part/RIGHT_ELBOW/center/x,
image/object/part/RIGHT_ELBOW/center/y,
image/object/part/RIGHT_ELBOW/score,
image/object/part/LEFT_WRIST/center/x,
image/object/part/LEFT_WRIST/center/y,
image/object/part/LEFT_WRIST/score,
image/object/part/RIGHT_WRIST/center/x,
image/object/part/RIGHT_WRIST/center/y,
image/object/part/RIGHT_WRIST/score,
image/object/part/LEFT_HIP/center/x,
image/object/part/LEFT_HIP/center/y,
image/object/part/LEFT_HIP/score,
image/object/part/RIGHT_HIP/center/x,
image/object/part/RIGHT_HIP/center/y,
image/object/part/RIGHT_HIP/score,
image/object/part/LEFT_KNEE/center/x,
image/object/part/LEFT_KNEE/center/y,
image/object/part/LEFT_KNEE/score,
image/object/part/RIGHT_KNEE/center/x,
image/object/part/RIGHT_KNEE/center/y,
image/object/part/RIGHT_KNEE/score,
image/object/part/LEFT_ANKLE/center/x,
image/object/part/LEFT_ANKLE/center/y,
image/object/part/LEFT_ANKLE/score,
image/object/part/RIGHT_ANKLE/center/x,
image/object/part/RIGHT_ANKLE/center/y,
image/object/part/RIGHT_ANKLE/score
2. The following rows are CSVs according to the header, one sample per row.
Note: The input 2D keypoint coordinate values are required to be normalized by
image sizes to within [0, 1].
The outputs will be written to `output_dir` in the format of CSV, with file
base names being the corresponding tensor keys, such as
`unnormalized_embeddings.csv`, `embedding_stddevs.csv`, etc.
In an output CSV file, each row corresponds to an input sample (the same row in
the input CSV file).
"""
import os
from absl import app
from absl import flags
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from poem.core import common
from poem.core import input_generator
from poem.core import keypoint_profiles
from poem.core import keypoint_utils
from poem.core import models
from poem.core import pipeline_utils
tf.disable_v2_behavior()
FLAGS = flags.FLAGS
flags.adopt_module_key_flags(common)
flags.DEFINE_string('input_csv', None, 'Path to input CSV file.')
flags.mark_flag_as_required('input_csv')
flags.DEFINE_string('output_dir', None, 'Path to output directory.')
flags.mark_flag_as_required('output_dir')
flags.DEFINE_string(
'input_keypoint_profile_name_2d', 'LEGACY_2DCOCO13',
'Profile name for 2D keypoints from input sources. Use None to ignore input'
' 2D keypoints.')
flags.DEFINE_string('model_input_keypoint_mask_type', 'NO_USE',
'Usage type of model input keypoint masks.')
flags.DEFINE_float(
'min_input_keypoint_score_2d', -1.0,
'Minimum threshold for input keypoint score binarization. Use negative '
'value to ignore. Only used if 2D keypoint masks are used.')
# See `common.SUPPORTED_EMBEDDING_TYPES`.
flags.DEFINE_string('embedding_type', 'GAUSSIAN', 'Type of embeddings.')
flags.DEFINE_integer('embedding_size', 16, 'Size of predicted embeddings.')
flags.DEFINE_integer(
'num_embedding_components', 1,
'Number of embedding components, e.g., the number of Gaussians in mixture.')
flags.DEFINE_integer('num_embedding_samples', 20,
'Number of samples from embedding distributions.')
# See `common.SUPPORTED_BASE_MODEL_TYPES`.
flags.DEFINE_string('base_model_type', 'SIMPLE', 'Type of base model.')
flags.DEFINE_integer('num_fc_blocks', 2, 'Number of fully connected blocks.')
flags.DEFINE_integer('num_fcs_per_block', 2,
'Number of fully connected layers per block.')
flags.DEFINE_integer('num_hidden_nodes', 1024,
'Number of nodes in each hidden fully connected layer.')
flags.DEFINE_integer(
'num_bottleneck_nodes', 0,
'Number of nodes in the bottleneck layer before the output layer(s). '
'Ignored if non-positive.')
flags.DEFINE_float(
'weight_max_norm', 0.0,
'Maximum norm of fully connected layer weights. Only used if positive.')
flags.DEFINE_string('checkpoint_path', None,
'Path to checkpoint to initialize from.')
flags.mark_flag_as_required('checkpoint_path')
flags.DEFINE_bool('use_moving_average', True,
'Whether to use exponential moving average.')
flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')
def read_inputs(keypoint_profile_2d):
"""Reads model inputs."""
keypoints_2d_col_names, keypoint_scores_2d_col_names = [], []
for keypoint_name in keypoint_profile_2d.keypoint_names:
keypoints_2d_col_names.append(
(common.TFE_KEY_PREFIX_KEYPOINT_2D + keypoint_name +
common.TFE_KEY_SUFFIX_KEYPOINT_2D[0]))
keypoints_2d_col_names.append(
(common.TFE_KEY_PREFIX_KEYPOINT_2D + keypoint_name +
common.TFE_KEY_SUFFIX_KEYPOINT_2D[1]))
keypoint_scores_2d_col_names.append(
(common.TFE_KEY_PREFIX_KEYPOINT_2D + keypoint_name +
common.TFE_KEY_SUFFIX_KEYPOINT_SCORE))
with tf.gfile.GFile(FLAGS.input_csv, 'r') as f:
data = pd.read_csv(
f,
usecols=([common.TFE_KEY_IMAGE_HEIGHT, common.TFE_KEY_IMAGE_WIDTH] +
keypoints_2d_col_names + keypoint_scores_2d_col_names))
image_sizes = tf.constant(
data[[common.TFE_KEY_IMAGE_HEIGHT,
common.TFE_KEY_IMAGE_WIDTH]].to_numpy(dtype=np.float32))
keypoints_2d = tf.constant(
data[keypoints_2d_col_names].to_numpy(dtype=np.float32))
keypoint_scores_2d = tf.constant(
data[keypoint_scores_2d_col_names].to_numpy(dtype=np.float32))
keypoints_2d = tf.reshape(
keypoints_2d,
[-1, keypoint_profile_2d.keypoint_num, keypoint_profile_2d.keypoint_dim])
keypoints_2d = keypoint_utils.denormalize_points_by_image_size(
keypoints_2d, image_sizes=image_sizes)
if FLAGS.min_input_keypoint_score_2d < 0.0:
keypoint_masks_2d = tf.ones_like(keypoint_scores_2d, dtype=tf.float32)
else:
keypoint_masks_2d = tf.cast(
tf.math.greater_equal(keypoint_scores_2d,
FLAGS.min_input_keypoint_score_2d),
dtype=tf.float32)
return keypoints_2d, keypoint_masks_2d
def main(_):
"""Runs inference."""
keypoint_profile_2d = (
keypoint_profiles.create_keypoint_profile_or_die(
FLAGS.input_keypoint_profile_name_2d))
g = tf.Graph()
with g.as_default():
keypoints_2d, keypoint_masks_2d = read_inputs(keypoint_profile_2d)
model_inputs, _ = input_generator.create_model_input(
keypoints_2d,
keypoint_masks_2d=keypoint_masks_2d,
keypoints_3d=None,
model_input_keypoint_type=common.MODEL_INPUT_KEYPOINT_TYPE_2D_INPUT,
model_input_keypoint_mask_type=FLAGS.model_input_keypoint_mask_type,
keypoint_profile_2d=keypoint_profile_2d,
# Fix seed for determinism.
seed=1)
embedder_fn = models.get_embedder(
base_model_type=FLAGS.base_model_type,
embedding_type=FLAGS.embedding_type,
num_embedding_components=FLAGS.num_embedding_components,
embedding_size=FLAGS.embedding_size,
num_embedding_samples=FLAGS.num_embedding_samples,
is_training=False,
num_fc_blocks=FLAGS.num_fc_blocks,
num_fcs_per_block=FLAGS.num_fcs_per_block,
num_hidden_nodes=FLAGS.num_hidden_nodes,
num_bottleneck_nodes=FLAGS.num_bottleneck_nodes,
weight_max_norm=FLAGS.weight_max_norm)
outputs, _ = embedder_fn(model_inputs)
if FLAGS.use_moving_average:
variables_to_restore = (
pipeline_utils.get_moving_average_variables_to_restore())
saver = tf.train.Saver(variables_to_restore)
else:
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(
init_op=tf.global_variables_initializer(), saver=saver)
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=FLAGS.checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
outputs_result = sess.run(outputs)
tf.gfile.MakeDirs(FLAGS.output_dir)
for key in [
common.KEY_EMBEDDING_MEANS, common.KEY_EMBEDDING_STDDEVS,
common.KEY_EMBEDDING_SAMPLES
]:
if key in outputs_result:
output = outputs_result[key]
np.savetxt(
os.path.join(FLAGS.output_dir, key + '.csv'),
output.reshape([output.shape[0], -1]),
delimiter=',')
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
poem/pr_vipe/infer.py
|
Python
|
apache-2.0
| 9,658
|
[
"Gaussian"
] |
71efe21d2c3479a30d975635de43968e30362a77da38ba0870ca798c61e5d87c
|
# mako/_ast_util.py
# Copyright 2006-2021 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
This is a stripped down version of Armin Ronacher's ast module.
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import Add
from _ast import And
from _ast import AST
from _ast import BitAnd
from _ast import BitOr
from _ast import BitXor
from _ast import Div
from _ast import Eq
from _ast import FloorDiv
from _ast import Gt
from _ast import GtE
from _ast import If
from _ast import In
from _ast import Invert
from _ast import Is
from _ast import IsNot
from _ast import LShift
from _ast import Lt
from _ast import LtE
from _ast import Mod
from _ast import Mult
from _ast import Name
from _ast import Not
from _ast import NotEq
from _ast import NotIn
from _ast import Or
from _ast import PyCF_ONLY_AST
from _ast import RShift
from _ast import Sub
from _ast import UAdd
from _ast import USub
BOOLOP_SYMBOLS = {And: "and", Or: "or"}
BINOP_SYMBOLS = {
Add: "+",
Sub: "-",
Mult: "*",
Div: "/",
FloorDiv: "//",
Mod: "%",
LShift: "<<",
RShift: ">>",
BitOr: "|",
BitAnd: "&",
BitXor: "^",
}
CMPOP_SYMBOLS = {
Eq: "==",
Gt: ">",
GtE: ">=",
In: "in",
Is: "is",
IsNot: "is not",
Lt: "<",
LtE: "<=",
NotEq: "!=",
NotIn: "not in",
}
UNARYOP_SYMBOLS = {Invert: "~", Not: "not", UAdd: "+", USub: "-"}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename="<unknown>", mode="exec"):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
class NodeVisitor:
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = "visit_" + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append("\n" * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write("else:")
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(", ")
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write("=")
self.visit(default)
if node.vararg is not None:
write_comma()
self.write("*" + node.vararg.arg)
if node.kwarg is not None:
write_comma()
self.write("**" + node.kwarg.arg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write("@")
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(", ")
self.visit(target)
self.write(" = ")
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + "=")
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write("from %s%s import " % ("." * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(", ")
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write("import ")
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write("def %s(" % node.name)
self.signature(node.args)
self.write("):")
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(", ")
else:
have_args.append(True)
self.write("(")
self.newline(n=3)
self.decorators(node)
self.newline()
self.write("class %s" % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, "keywords"):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + "=")
self.visit(keyword.value)
if getattr(node, "starargs", None):
paren_or_comma()
self.write("*")
self.visit(node.starargs)
if getattr(node, "kwargs", None):
paren_or_comma()
self.write("**")
self.visit(node.kwargs)
self.write(have_args and "):" or ":")
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write("if ")
self.visit(node.test)
self.write(":")
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write("elif ")
self.visit(node.test)
self.write(":")
self.body(node.body)
else:
self.newline()
self.write("else:")
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write("for ")
self.visit(node.target)
self.write(" in ")
self.visit(node.iter)
self.write(":")
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write("while ")
self.visit(node.test)
self.write(":")
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write("with ")
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(" as ")
self.visit(node.optional_vars)
self.write(":")
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write("pass")
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write("print ")
want_comma = False
if node.dest is not None:
self.write(" >> ")
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(", ")
self.visit(value)
want_comma = True
if not node.nl:
self.write(",")
def visit_Delete(self, node):
self.newline()
self.write("del ")
for idx, target in enumerate(node):
if idx:
self.write(", ")
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write("try:")
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write("try:")
self.body(node.body)
self.newline()
self.write("finally:")
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write("global " + ", ".join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write("nonlocal " + ", ".join(node.names))
def visit_Return(self, node):
self.newline()
self.write("return ")
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write("break")
def visit_Continue(self, node):
self.newline()
self.write("continue")
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write("raise")
if hasattr(node, "exc") and node.exc is not None:
self.write(" ")
self.visit(node.exc)
if node.cause is not None:
self.write(" from ")
self.visit(node.cause)
elif hasattr(node, "type") and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(", ")
self.visit(node.inst)
if node.tback is not None:
self.write(", ")
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write("." + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(", ")
else:
want_comma.append(True)
self.visit(node.func)
self.write("(")
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + "=")
self.visit(keyword.value)
if getattr(node, "starargs", None):
write_comma()
self.write("*")
self.visit(node.starargs)
if getattr(node, "kwargs", None):
write_comma()
self.write("**")
self.visit(node.kwargs)
self.write(")")
def visit_Name(self, node):
self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
# newly needed in Python 3.8
def visit_Constant(self, node):
self.write(repr(node.value))
def visit_Tuple(self, node):
self.write("(")
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(", ")
self.visit(item)
self.write(idx and ")" or ",)")
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(", ")
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit("[", "]")
visit_Set = sequence_visit("{", "}")
del sequence_visit
def visit_Dict(self, node):
self.write("{")
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(", ")
self.visit(key)
self.write(": ")
self.visit(value)
self.write("}")
def visit_BinOp(self, node):
self.write("(")
self.visit(node.left)
self.write(" %s " % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(")")
def visit_BoolOp(self, node):
self.write("(")
for idx, value in enumerate(node.values):
if idx:
self.write(" %s " % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(")")
def visit_Compare(self, node):
self.write("(")
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(" %s " % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(")")
def visit_UnaryOp(self, node):
self.write("(")
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == "not":
self.write(" ")
self.visit(node.operand)
self.write(")")
def visit_Subscript(self, node):
self.visit(node.value)
self.write("[")
self.visit(node.slice)
self.write("]")
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(":")
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(":")
if not (isinstance(node.step, Name) and node.step.id == "None"):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(", ")
self.visit(item)
def visit_Yield(self, node):
self.write("yield ")
self.visit(node.value)
def visit_Lambda(self, node):
self.write("lambda ")
self.signature(node.args)
self.write(": ")
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write("Ellipsis")
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit("[", "]")
visit_GeneratorExp = generator_visit("(", ")")
visit_SetComp = generator_visit("{", "}")
del generator_visit
def visit_DictComp(self, node):
self.write("{")
self.visit(node.key)
self.write(": ")
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write("}")
def visit_IfExp(self, node):
self.visit(node.body)
self.write(" if ")
self.visit(node.test)
self.write(" else ")
self.visit(node.orelse)
def visit_Starred(self, node):
self.write("*")
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write("`")
self.visit(node.value)
self.write("`")
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(" as " + node.asname)
def visit_comprehension(self, node):
self.write(" for ")
self.visit(node.target)
self.write(" in ")
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(" if ")
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write("except")
if node.type is not None:
self.write(" ")
self.visit(node.type)
if node.name is not None:
self.write(" as ")
self.visit(node.name)
self.write(":")
self.body(node.body)
|
sqlalchemy/mako
|
mako/_ast_util.py
|
Python
|
mit
| 20,247
|
[
"VisIt"
] |
9d3b3a617c7680ecd395b7903343eb363fbefafe84dede946a2f728a5975e1f2
|
r"""
This module is a VTK Web server application.
The following command line illustrate how to use it::
$ vtkpython .../vtk_web_graph.py --vertices 1000 --edges 400
Any VTK Web executable script come with a set of standard arguments that
can be overriden if need be::
--host localhost
Interface on which the HTTP server will listen on.
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtk-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtk-secret" as secret key.
"""
# import to process args
import sys
import os
# import vtk modules.
from vtk import *
import json
import math
# import vtk web modules
from vtk.web import protocols, server
from vtk.web import wamp as vtk_wamp
# import annotations
from autobahn.wamp import procedure as exportRpc
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom File Opener class to handle clients requests
# =============================================================================
class _WebGraph(vtk_wamp.ServerProtocol):
# Application configuration
vertices = 1000
edges = 400
view = None
authKey = "vtkweb-secret"
def initialize(self):
global renderer, renderWindow, renderWindowInteractor, cone, mapper, actor
# Bring used components
self.registerVtkWebProtocol(protocols.vtkWebMouseHandler())
self.registerVtkWebProtocol(protocols.vtkWebViewPort())
self.registerVtkWebProtocol(protocols.vtkWebViewPortImageDelivery())
# Update authentication key to use
self.updateSecret(_WebGraph.authKey)
# Create default pipeline (Only once for all the sessions)
if not _WebGraph.view:
# Generate Random graph
random = vtkRandomGraphSource()
random.SetNumberOfVertices(_WebGraph.vertices)
random.SetNumberOfEdges(_WebGraph.edges)
random.SetStartWithTree(True)
random.Update()
graphData = random.GetOutput()
# Create view
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(graphData)
# Customize Rendering
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetScalingArrayName("vertex id")
view.ScaledGlyphsOn()
view.HideVertexLabelsOnInteractionOn()
view.SetEdgeColorArrayName("edge id")
view.SetColorEdges(True)
view.SetLayoutStrategyToSpanTree()
# Set trackball interaction style
style = vtkInteractorStyleTrackballCamera()
view.GetRenderWindow().GetInteractor().SetInteractorStyle(style)
# VTK Web application specific
_WebGraph.view = view
view.ResetCamera()
view.Render()
self.Application.GetObjectIdMap().SetActiveObject("VIEW", view.GetRenderWindow())
@exportRpc("graph.layout.update")
def changeLayout(self, layoutName):
if layoutName == 'ForceDirected' :
print 'Layout Strategy = Force Directed'
_WebGraph.view.SetLayoutStrategyToForceDirected()
_WebGraph.view.GetLayoutStrategy().ThreeDimensionalLayoutOn()
if layoutName == 'SpanTree' :
print 'Layout Strategy = Span Tree (Depth First Off)'
_WebGraph.view.SetLayoutStrategyToSpanTree()
_WebGraph.view.GetLayoutStrategy().DepthFirstSpanningTreeOff()
elif layoutName == 'SpanTreeDepthFirst' :
print 'Layout Strategy = Span Tree (Depth First On)'
_WebGraph.view.SetLayoutStrategyToSpanTree()
_WebGraph.view.GetLayoutStrategy().DepthFirstSpanningTreeOn()
elif layoutName == 'Circular' :
print 'Layout Strategy = Circular'
_WebGraph.view.SetLayoutStrategyToCircular()
elif layoutName == 'Random' :
print 'Layout Strategy = Random'
_WebGraph.view.SetLayoutStrategyToRandom()
elif layoutName == 'Fast2D' :
print 'Layout Strategy = Fast 2D'
_WebGraph.view.SetLayoutStrategyToFast2D()
elif layoutName == 'Clustering2D' :
print 'Layout Strategy = Clustering 2D'
_WebGraph.view.SetLayoutStrategyToClustering2D()
elif layoutName == 'Community2D' :
print 'Layout Strategy = Community 2D'
_WebGraph.view.SetLayoutStrategyToCommunity2D()
_WebGraph.view.ResetCamera()
_WebGraph.view.Render()
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="VTK/Web Graph web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--vertices", help="Number of vertices used to generate graph", dest="vertices", type=int, default=1000)
parser.add_argument("--edges", help="Number of edges used to generate graph", dest="edges", type=int, default=400)
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_WebGraph.authKey = args.authKey
_WebGraph.vertices = args.vertices
_WebGraph.edges = args.edges
# Start server
server.start_webserver(options=args, protocol=_WebGraph)
|
berendkleinhaneveld/VTK
|
Web/Applications/GraphLayout/server/vtk_web_graph.py
|
Python
|
bsd-3-clause
| 6,396
|
[
"VTK"
] |
856be57af4e027f1adbf94251f023346bbc33896019f2b94b288292eb4405d29
|
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import env
import requests
import json
import pprint
from userinfo import get_user_address
from restaurantslist import get_restaurants_list
from fuzzymatching import match_fuzzy_string
from postmatesapi import get_delivery_info
from yelpcall import business_match_api
from yelpcall import transaction_api
"""
global variable for nearby restaurants
"""
# response = business_match_api("Los+Compadres+Taco+Truck", 'San Francisco', 'CA', 'US')
# print(response)
# response = transaction_api('401 W Brooks St, Norman, OK, US 73019')
# print(response)
# restaurants = get_restaurants_list('buger king', '303 Wadsack Dr, Norman', 0)
# print(restaurants)
# restaurant_namelist = []
# restaurant_infocus = None
# restaurants = get_restaurants_list('restaurant','401 W Brooks St, Norman, OK, US 73019', 49)
# for restaurant in restaurants:
# restaurant_namelist.append(restaurant['name'])
# print(restaurant['name'] + ' ')
# fuzzy_restaurant = "coriander cafe"
# result = match_fuzzy_string(fuzzy_restaurant, restaurant_namelist)
# print('result: ' + str(result) + 'fuzzy string: ' + str(fuzzy_restaurant))
# for restaurant in restaurants:
# if restaurant['name'] == result:
# restaurant_infocus = restaurant
# output_text = restaurant_infocus['name'] + ' . rating .' + str(restaurant_infocus['rating'])
# output_text = restaurant_infocus['phone']
# print (output_text)
# address='303 Wadsack Dr, Norman, OK'
# pickup_address = ''
# address_lines = restaurant_infocus['location']['display_address']
# for line in address_lines:
# pickup_address += (line + ' ')
# response = get_delivery_info(pickup_address, address)
# delivery_time = response['duration']
# output_text = 'estimated delivery time is ' + str(delivery_time) + ' minutes'
# print(output_text)
# card = {
# "type": "Standard",
# "title": "restaurants",
# "text": restaurants[0]['name'] + "\n",
# "image": {
# "smallImageUrl": "http://s3-media2.fl.yelpcdn.com/bphoto/MmgtASP3l_t4tPCL1iAsCg/o.jpg",
# "largeImageUrl": "http://s3-media2.fl.yelpcdn.com/bphoto/MmgtASP3l_t4tPCL1iAsCg/o.jpg"
# }
# }
# obj = build_card_response(card, "haha", " ", True)
# print(obj['outputSpeech']['text'])
# pickup_address = "1241 Alameda Street, Norman, OK 73071"
# dropoff_address = "303 Wadsack Dr, Norman, OK 73072"
# response = get_delivery_info(pickup_address, dropoff_address)
# delivery_time = response['duration'] + ' minutes'
# print(delivery_time)
# --------------- Helpers that build all of the responses ----------------------
#url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=-33.8670522,151.1957362&radius=500&type=restaurant&keyword=cruise&key=AIzaSyBOimD8S4Ifw8o1XBEEFO7YKylK9d0sSJk'
#r = requests.get(url)
def build_ask_permission_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
"card": {
"type": "AskForPermissionsConsent",
"permissions": [
"read::alexa:device:all:address"
]
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_card_response(card, output, reprompt_text, should_end_session):
response = {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
response['card'] = card
return response
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
# speech_output = r.json()['results'][0]['name']
speech_output = "Welcome to Eatquick"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = ""
should_end_session = False
return build_response(session_attributes, build_ask_permission_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills Kit sample. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session, context):
intent = intent_request["intent"]
intent_name = intent_request["intent"]["name"]
if intent_name == 'GetNearbyRestaurants' or intent_name == 'GetMoreRestaurants':
address = get_user_address(context)
output_text = ""
if address:
restaurants = get_restaurants_list('restaurant', address, 20)
if intent_name == 'GetNearbyRestaurants':
restaurants = restaurants[0:11]
elif intent_name == 'GetMoreRestaurants':
restaurants = restaurants[11:]
if restaurants:
formatted_text = ''
for restaurant in restaurants:
output_text += restaurant['name'] + ' .'
formatted_text += restaurant['name'] + "\n"
if intent_name == 'GetNearbyRestaurants':
output_text += "To ask for more information for a restaurant, Say . tell me more about with the name of the restaurant" \
"Or you can ask for its phone number or address or if it is open. To hear more restaurant options, say . more restaurants. "
elif intent_name == 'GetMoreRestaurants':
output_text += "To ask for more information for a restaurant, Say . tell me more about with the name of the restaurant" \
"Or you can ask for its phone number or address or if it is open. "
card = {
"type": "Standard",
"title": "restaurants",
"text": formatted_text,
"image": {
"smallImageUrl": "https://s3-media2.fl.yelpcdn.com/bphoto/MmgtASP3l_t4tPCL1iAsCg/o.jpg",
"largeImageUrl": "https://s3-media2.fl.yelpcdn.com/bphoto/MmgtASP3l_t4tPCL1iAsCg/o.jpg"
}
}
session_attributes = {}
reprompt_text = ""
should_end_session = False
speech_output = output_text
return build_response(session_attributes, build_card_response(card, speech_output, reprompt_text, should_end_session))
else:
# fallback response (restaurants not found)
session_attributes = {}
card_title = ''
reprompt_text = ''
should_end_session = True
speech_output = 'Sorry. Eatquik cannot find any restaurants within 5 miles of where you are. '
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
else:
# user permission not granted, send out prompt message and a new permission card
session_attributes = {}
card_title = ''
reprompt_text = ''
should_end_session = True
speech_output = 'eatquick cannot function without address information. '\
'To permit access to address information, enable eatquick again, and consent to provide address information in the Alexa app.'
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
elif intent_name == 'GetRestaurantInfo' or intent_name == 'GetRestaurantCategory' or \
intent_name == 'GetRestaurantNumber' or \
intent_name == 'GetRestaurantStatus' or \
intent_name == 'GetRestaurantAddress' or \
intent_name == 'GetDeliveryTime':
# string from Alexa literal string
fuzzy_restaurant = intent['slots']['restaurant']['value']
# stores nearby restaurants
restaurant_namelist = []
# stores restaurant user asks for
restaurant_infocus = None
output_text = ""
"""
get restaurants
"""
address = get_user_address(context)
if address:
restaurants = get_restaurants_list('restaurant', address, 49)
if restaurants:
for item in restaurants:
restaurant_namelist.append(item['name'])
# fuzzy matching with restaurant name list
restaurant = match_fuzzy_string(fuzzy_restaurant, restaurant_namelist)
if restaurant:
for item in restaurants:
if item['name'] == restaurant:
restaurant_infocus = item
if intent_name == 'GetRestaurantInfo':
categories = restaurant_infocus['categories']
category_list = ''
for category in categories:
category_list += category['title'] + '. .'
output_text = (restaurant_infocus['name'] + ' . '
+ category_list
+ ' . rating . ' + str(restaurant_infocus['rating'])
+ '. and is open now .' if (restaurant_infocus['is_closed'] == False) else '. and is closed now .')
elif intent_name == 'GetRestaurantCategory':
categories = restaurant_infocus['categories']
for category in categories:
output_text += category['title'] + '. .'
elif intent_name == 'GetRestaurantNumber':
output_text = 'The phone number for ' + restaurant_infocus['name'] + ' is '
phone_num = restaurant_infocus['phone']
phone_digits = list(phone_num)
phone_digits.pop(0) # get rid of + before country code
for digit in phone_digits:
output_text += (digit + ' ')
elif intent_name == 'GetRestaurantStatus':
output_text = restaurant_infocus['name']
output_text += '. is now open .' if (restaurant_infocus['is_closed'] == False) else '. is closed .'
elif intent_name == 'GetRestaurantAddress':
output_text = restaurant_infocus['name'] + " . is located at . "
address_lines = restaurant_infocus['location']['display_address']
for line in address_lines:
output_text += (line + " . ")
elif intent_name == 'GetDeliveryTime':
# code for postmates
pickup_address = ''
address_lines = restaurant_infocus['location']['display_address']
for line in address_lines:
pickup_address += (line + ' ')
response = get_delivery_info(pickup_address, address)
delivery_time = response['duration']
output_text = 'estimated delivery time is ' + str(delivery_time) + ' minutes according to postmate'
session_attributes = {}
card_title = "restaurant info: " #+ restaurant_infocus
reprompt_text = ""
should_end_session = False
speech_output = output_text
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
else:
# no matching restaurant
session_attributes = {}
card_title = ''
reprompt_text = ''
should_end_session = False
speech_output = 'Sorry. The restaurant you asked for is not in this area. '\
'Please choose a restaurant from the list of nearby restaurants we provided and ask again. ' \
'To listen to the list of restaurants nearby. Say. . what are the restaurants nearby. '
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
else:
# fallback response (list of restaurants not found)
session_attributes = {}
card_title = ''
reprompt_text = ''
should_end_session = True
speech_output = 'Sorry. The restaurant you asked for is not in this area. '\
'Please choose a restaurant from the list of nearby restaurants we provided and ask again. ' \
'To listen to the list of restaurants nearby. Say. . what are the restaurants nearby. '
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
else:
# user permission not granted, send out prompt message and a new permission card
session_attributes = {}
card_title = ''
reprompt_text = ''
should_end_session = True
speech_output = 'eatquick cannot function without address information. '\
'To permit access to address information, enable eatquick again, and consent to provide address information in the Alexa app.'
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
elif intent_name == 'EndSession':
session_attributes = {}
card_title = 'Good Bye!'
reprompt_text = ''
should_end_session = True
speech_output = 'Good bye!'
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
handle_session_end_request()
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
"""
code
"""
# userAddress = get_user_address(event['context']['system'])
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'],event['context'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
|
chanyoonzhu/eatquik
|
eatquik.py
|
Python
|
mit
| 18,563
|
[
"VisIt"
] |
74fcdcc832be0af11230ad74e3adb932bfa6f14846925ac318caf613d80e1fac
|
from __future__ import division, absolute_import, print_function
import os
import sys
import types
import re
import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import ndarray, ufunc, asarray
# getargspec and formatargspec were removed in Python 3.6
from numpy.compat import getargspec, formatargspec
__all__ = [
'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
'lookfor', 'byte_bounds', 'safe_eval'
]
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.__name__
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in
which case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case the
deprecation message is that `old_name` is deprecated. If given, the
deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the
docstring after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation
Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None:
# contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays
present in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name], ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original = 0
else:
cache[idv] = name
namestr = name
original = 1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10, maxname)
sp2 = max(10, maxshape)
sp3 = max(10, maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print(prval + "\n" + "="*(len(prval)+5) + "\n")
for k in range(len(sta)):
val = sta[k]
print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3]))
print("\nUpper bound on total bytes = %d" % totalbytes)
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of width
# characters. End lines on a comma and begin argument list indented with
# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while True:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x], types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def _info(obj, output=sys.stdout):
"""Provide information about ndarray obj.
Parameters
----------
obj: ndarray
Must be ndarray, not checked.
output:
Where printed output goes.
Notes
-----
Copied over from the numarray module prior to its removal.
Adapted somewhat as only numpy is an option now.
Called by info.
"""
extra = ""
tic = ""
bp = lambda x: x
cls = getattr(obj, '__class__', type(obj))
nm = getattr(cls, '__name__', cls)
strides = obj.strides
endian = obj.dtype.byteorder
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
print("strides: ", strides, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
print(
"data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
file=output
)
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
print("type: %s" % obj.dtype, file=output)
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects. If None, information
about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is
``stdout``. The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent
to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc
import inspect
if (hasattr(object, '_ppimport_importer') or
hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
_info(object, output=output)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print("\n "
"*** Repeat reference found in %s *** " % namestr,
file=output
)
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
info(obj)
print("-"*maxwidth, file=output)
numfound += 1
except KeyError:
pass
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
print("\n "
"*** Total of %d references found. ***" % numfound,
file=output
)
elif inspect.isfunction(object):
name = object.__name__
arguments = formatargspec(*getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = formatargspec(
*getargspec(object.__init__.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object, '__init__'):
print(inspect.getdoc(object.__init__), file=output)
else:
print(inspect.getdoc(object), file=output)
methods = pydoc.allmethods(object)
if methods != []:
print("\n\nMethods:\n", file=output)
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(
inspect.getdoc(thisobj) or "None"
)
print(" %s -- %s" % (meth, methstr), file=output)
elif (sys.version_info[0] < 3
and isinstance(object, types.InstanceType)):
# check for __call__ method
# types.InstanceType is the type of the instances of oldstyle classes
print("Instance of class: ", object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
arguments = formatargspec(
*getargspec(object.__call__.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object, 'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
doc = inspect.getdoc(object.__call__)
if doc is not None:
print(inspect.getdoc(object.__call__), file=output)
print(inspect.getdoc(object), file=output)
else:
print(inspect.getdoc(object), file=output)
elif inspect.ismethod(object):
name = object.__name__
arguments = formatargspec(
*getargspec(object.__func__)
)
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print(" " + argstr + "\n", file=output)
print(inspect.getdoc(object), file=output)
elif hasattr(object, '__doc__'):
print(inspect.getdoc(object), file=output)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module,
...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
except:
print("Not available for this object.", file=output)
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function
# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats:
return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print("\n".join(help_text))
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
if sys.version_info[0] >= 3:
# In Python3 stderr, stdout are text files.
from io import StringIO
else:
from StringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen:
continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if (os.path.isfile(this_py) and
mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError:
# ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except Exception:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
.. deprecated:: 1.10.0
See Also
--------
safe_eval
"""
def __init__(self):
# 2014-10-15, 1.10
warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
DeprecationWarning)
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def visitNameConstant(self, node):
return node.value
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains
non-literal code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local import to speed up numpy's import time.
import ast
return ast.literal_eval(source)
#-----------------------------------------------------------------------------
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/lib/utils.py
|
Python
|
bsd-2-clause
| 34,996
|
[
"VisIt"
] |
4620a3ea5b47e1966930293b530dea38fdcc8e305061677f0c52deb7fb6f940d
|
import hashlib
import os
import shutil
import typing as T
import cdsapi # type: ignore
SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data")
EXTENSIONS = {"grib": ".grib", "netcdf": ".nc"}
def ensure_data(dataset, request, folder=SAMPLE_DATA_FOLDER, name="{uuid}.grib"):
# type: (str, T.Dict[str, T.Any], str, str) -> str
request_text = str(sorted(request.items())).encode("utf-8")
uuid = hashlib.sha3_224(request_text).hexdigest()[:10]
format = request.get("format", "grib")
ext = EXTENSIONS.get(format, ".bin")
name = name.format(**locals())
path = os.path.join(SAMPLE_DATA_FOLDER, name)
if not os.path.exists(path):
c = cdsapi.Client()
try:
c.retrieve(dataset, request, target=path + ".tmp")
shutil.move(path + ".tmp", path)
except:
os.unlink(path + ".tmp")
raise
return path
|
ecmwf/cfgrib
|
tests/cdscommon.py
|
Python
|
apache-2.0
| 914
|
[
"NetCDF"
] |
2699920a8902acd703c5ef80bfbea68cfebfb77e3d96e0a02c12f127a8189efc
|
# Orca
#
# Copyright 2014 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for epiphany."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2014 Igalia, S.L."
__license__ = "LGPL"
import orca.orca as orca
import orca.scripts.toolkits.gtk as gtk
import orca.scripts.toolkits.WebKitGtk as WebKitGtk
class Script(WebKitGtk.Script):
def __init__(self, app):
WebKitGtk.Script.__init__(self, app)
def onWindowActivated(self, event):
"""Callback for window:activate accessibility events."""
gtk.Script.onWindowActivated(self, event)
obj, offset = self.utilities.getCaretContext()
if obj:
orca.setLocusOfFocus(None, obj)
def onWindowDeactivated(self, event):
"""Callback for window:deactivate accessibility events."""
gtk.Script.onWindowDeactivated(self, event)
|
pvagner/orca
|
src/orca/scripts/apps/epiphany/script.py
|
Python
|
lgpl-2.1
| 1,664
|
[
"ORCA"
] |
e41ee22eb4db0c1210994c91cdb7ebc815ace448f4ac47581a3c0836b68bbda7
|
import json
import logging
import sys
import networkx as nx
from networkx.algorithms import weakly_connected_component_subgraphs
from collections import defaultdict
from itertools import chain, ifilter
from functools import partial
from synapseclustering import tree_max_density
from numpy import subtract
from numpy.linalg import norm
from math import sqrt
from django.db import connection
from django.http import HttpResponse
from catmaid.models import Relation, UserRole
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import get_relation_to_id_map
from catmaid.control.review import get_treenodes_to_reviews
from catmaid.control.tree_util import simplify, find_root, reroot, partition, \
spanning_tree, cable_length
def split_by_confidence_and_add_edges(confidence_threshold, digraphs, rows):
""" dipgrahs is a dictionary of skeleton IDs as keys and DiGraph instances as values,
where the DiGraph does not have any edges yet.
WARNING: side effect on contents of digraph: will add the edges
"""
arbors = {}
# Define edges, which may result in multiple subgraphs for each skeleton
# when splitting at low-confidence edges:
if 0 == confidence_threshold:
# Do not split skeletons
for row in rows:
if row[1]:
digraphs[row[3]].add_edge(row[1], row[0])
for skid, digraph in digraphs.iteritems():
arbors[skid] = [digraph]
else:
# The DiGraph representing the skeleton may be disconnected at a low-confidence edge
to_split = set()
for row in rows:
if row[2] < confidence_threshold:
to_split.add(row[3])
elif row[1]:
digraphs[row[3]].add_edge(row[1], row[0])
for skid, digraph in digraphs.iteritems():
if skid in to_split:
arbors[skid] = weakly_connected_component_subgraphs(digraph)
else:
arbors[skid] = [digraph]
return arbors
def split_by_synapse_domain(bandwidth, locations, arbors, treenode_connector, minis):
""" locations: dictionary of treenode ID vs tuple with x,y,z
arbors: dictionary of skeleton ID vs list of DiGraph (that were, or not, split by confidence)
treenode_connectors: dictionary of treenode ID vs list of tuples of connector_id, string of 'presynaptic_to' or 'postsynaptic_to'
"""
arbors2 = {} # Some arbors will be split further
for skeleton_id, graphs in arbors.iteritems():
subdomains = []
arbors2[skeleton_id] = subdomains
for graph in graphs:
treenode_ids = []
connector_ids =[]
relation_ids = []
for treenode_id in ifilter(treenode_connector.has_key, graph.nodes_iter()):
for c in treenode_connector.get(treenode_id):
connector_id, relation = c
treenode_ids.append(treenode_id)
connector_ids.append(connector_id)
relation_ids.append(relation)
if not connector_ids:
subdomains.append(graph)
continue
for parent_id, treenode_id in graph.edges_iter():
loc0 = locations[treenode_id]
loc1 = locations[parent_id]
graph[parent_id][treenode_id]['weight'] = norm(subtract(loc0, loc1))
# Invoke Casey's magic
synapse_group = tree_max_density(graph.to_undirected(), treenode_ids, connector_ids, relation_ids, [bandwidth]).values()[0]
# The list of nodes of each synapse_group contains only nodes that have connectors
# A local_max is the skeleton node most central to a synapse_group
anchors = {}
for domain in synapse_group.itervalues():
g = nx.DiGraph()
g.add_nodes_from(domain.node_ids) # bogus graph, containing treenodes that point to connectors
subdomains.append(g)
anchors[domain.local_max] = g
# Define edges between domains: create a simplified graph
mini = simplify(graph, anchors.keys())
# Replace each node by the corresponding graph, or a graph of a single node
for node in mini.nodes_iter():
g = anchors.get(node)
if not g:
# A branch node that was not an anchor, i.e. did not represent a synapse group
g = nx.Graph()
g.add_node(node, {'branch': True})
subdomains.append(g)
# Associate the Graph with treenodes that have connectors
# with the node in the minified tree
mini.node[node]['g'] = g
# Put the mini into a map of skeleton_id and list of minis,
# to be used later for defining intra-neuron edges in the circuit graph
minis[skeleton_id].append(mini)
return arbors2, minis
def _skeleton_graph(project_id, skeleton_ids, confidence_threshold, bandwidth, expand, compute_risk, cable_spread, path_confluence):
""" Assumes all skeleton_ids belong to project_id. """
skeletons_string = ",".join(str(int(x)) for x in skeleton_ids)
cursor = connection.cursor()
# Fetch all treenodes of all skeletons
cursor.execute('''
SELECT id, parent_id, confidence, skeleton_id,
location_x, location_y, location_z
FROM treenode
WHERE skeleton_id IN (%s)
''' % skeletons_string)
rows = tuple(cursor.fetchall())
# Each skeleton is represented with a DiGraph
arbors = defaultdict(nx.DiGraph)
# Get reviewers for the requested skeletons
reviews = get_treenodes_to_reviews(skeleton_ids=skeleton_ids)
# Create a DiGraph for every skeleton
for row in rows:
arbors[row[3]].add_node(row[0], {'reviewer_ids': reviews.get(row[0], [])})
# Dictionary of skeleton IDs vs list of DiGraph instances
arbors = split_by_confidence_and_add_edges(confidence_threshold, arbors, rows)
# Fetch all synapses
relations = get_relation_to_id_map(project_id, cursor=cursor)
cursor.execute('''
SELECT connector_id, relation_id, treenode_id, skeleton_id
FROM treenode_connector
WHERE skeleton_id IN (%s)
AND (relation_id = %s OR relation_id = %s)
''' % (skeletons_string, relations['presynaptic_to'], relations['postsynaptic_to']))
connectors = defaultdict(partial(defaultdict, list))
skeleton_synapses = defaultdict(partial(defaultdict, list))
for row in cursor.fetchall():
connectors[row[0]][row[1]].append((row[2], row[3]))
skeleton_synapses[row[3]][row[1]].append(row[2])
# Cluster by synapses
minis = defaultdict(list) # skeleton_id vs list of minified graphs
locations = None
whole_arbors = arbors
if expand and bandwidth > 0:
locations = {row[0]: (row[4], row[5], row[6]) for row in rows}
treenode_connector = defaultdict(list)
for connector_id, pp in connectors.iteritems():
for treenode_id in chain.from_iterable(pp[relations['presynaptic_to']]):
treenode_connector[treenode_id].append((connector_id, "presynaptic_to"))
for treenode_id in chain.from_iterable(pp[relations['postsynaptic_to']]):
treenode_connector[treenode_id].append((connector_id, "postsynaptic_to"))
arbors_to_expand = {skid: ls for skid, ls in arbors.iteritems() if skid in expand}
expanded_arbors, minis = split_by_synapse_domain(bandwidth, locations, arbors_to_expand, treenode_connector, minis)
arbors.update(expanded_arbors)
# Obtain neuron names
cursor.execute('''
SELECT cici.class_instance_a, ci.name
FROM class_instance ci,
class_instance_class_instance cici
WHERE cici.class_instance_a IN (%s)
AND cici.class_instance_b = ci.id
AND cici.relation_id = %s
''' % (skeletons_string, relations['model_of']))
names = dict(cursor.fetchall())
# A DiGraph representing the connections between the arbors (every node is an arbor)
circuit = nx.DiGraph()
for skid, digraphs in arbors.iteritems():
base_label = names[skid]
tag = len(digraphs) > 1
i = 0
for g in digraphs:
if g.number_of_nodes() == 0:
continue
if tag:
label = "%s [%s]" % (base_label, i+1)
else:
label = base_label
circuit.add_node(g, {'id': "%s_%s" % (skid, i+1),
'label': label,
'skeleton_id': skid,
'node_count': len(g),
'node_reviewed_count': sum(1 for v in g.node.itervalues() if 0 != len(v.get('reviewer_ids', []))), # TODO when bandwidth > 0, not all nodes are included. They will be included when the bandwidth is computed with an O(n) algorithm rather than the current O(n^2)
'branch': False})
i += 1
# Define edges between arbors, with number of synapses as an edge property
for c in connectors.itervalues():
for pre_treenode, pre_skeleton in c[relations['presynaptic_to']]:
for pre_arbor in arbors.get(pre_skeleton, ()):
if pre_treenode in pre_arbor:
# Found the DiGraph representing an arbor derived from the skeleton to which the presynaptic treenode belongs.
for post_treenode, post_skeleton in c[relations['postsynaptic_to']]:
for post_arbor in arbors.get(post_skeleton, ()):
if post_treenode in post_arbor:
# Found the DiGraph representing an arbor derived from the skeleton to which the postsynaptic treenode belongs.
edge_props = circuit.get_edge_data(pre_arbor, post_arbor)
if edge_props:
edge_props['c'] += 1
edge_props['pre_treenodes'].append(pre_treenode)
edge_props['post_treenodes'].append(post_treenode)
else:
circuit.add_edge(pre_arbor, post_arbor, {'c': 1, 'pre_treenodes': [pre_treenode], 'post_treenodes': [post_treenode], 'arrow': 'triangle', 'directed': True})
break
break
if compute_risk and bandwidth <= 0:
# Compute synapse risk:
# Compute synapse centrality of every node in every arbor that has synapses
for skeleton_id, arbors in whole_arbors.iteritems():
synapses = skeleton_synapses[skeleton_id]
pre = synapses[relations['presynaptic_to']]
post = synapses[relations['postsynaptic_to']]
for arbor in arbors:
# The subset of synapses that belong to the fraction of the original arbor
pre_sub = tuple(treenodeID for treenodeID in pre if treenodeID in arbor)
post_sub = tuple(treenodeID for treenodeID in post if treenodeID in arbor)
totalInputs = len(pre_sub)
totalOutputs = len(post_sub)
tc = {treenodeID: Counts() for treenodeID in arbor}
for treenodeID in pre_sub:
tc[treenodeID].outputs += 1
for treenodeID in post_sub:
tc[treenodeID].inputs += 1
# Update the nPossibleIOPaths field in the Counts instance of each treenode
_node_centrality_by_synapse(arbor, tc, totalOutputs, totalInputs)
arbor.treenode_synapse_counts = tc
if not locations:
locations = {row[0]: (row[4], row[5], row[6]) for row in rows}
# Estimate the risk factor of the edge between two arbors,
# as a function of the number of synapses and their location within the arbor.
# Algorithm by Casey Schneider-Mizell
# Implemented by Albert Cardona
for pre_arbor, post_arbor, edge_props in circuit.edges_iter(data=True):
if pre_arbor == post_arbor:
# Signal autapse
edge_props['risk'] = -2
continue
try:
spanning = spanning_tree(post_arbor, edge_props['post_treenodes'])
#for arbor in whole_arbors[circuit[post_arbor]['skeleton_id']]:
# if post_arbor == arbor:
# tc = arbor.treenode_synapse_counts
tc = post_arbor.treenode_synapse_counts
count = spanning.number_of_nodes()
if count < 3:
median_synapse_centrality = sum(tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes_iter()) / count
else:
median_synapse_centrality = sorted(tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes_iter())[count / 2]
cable = cable_length(spanning, locations)
if -1 == median_synapse_centrality:
# Signal not computable
edge_props['risk'] = -1
else:
edge_props['risk'] = 1.0 / sqrt(pow(cable / cable_spread, 2) + pow(median_synapse_centrality / path_confluence, 2)) # NOTE: should subtract 1 from median_synapse_centrality, but not doing it here to avoid potential divisions by zero
except Exception as e:
logging.getLogger(__name__).error(e)
# Signal error when computing
edge_props['risk'] = -3
if expand and bandwidth > 0:
# Add edges between circuit nodes that represent different domains of the same neuron
for skeleton_id, list_mini in minis.iteritems():
for mini in list_mini:
for node in mini.nodes_iter():
g = mini.node[node]['g']
if 1 == len(g) and g.nodes_iter(data=True).next()[1].get('branch'):
# A branch node that was preserved in the minified arbor
circuit.add_node(g, {'id': '%s-%s' % (skeleton_id, node),
'skeleton_id': skeleton_id,
'label': "", # "%s [%s]" % (names[skeleton_id], node),
'node_count': 1,
'branch': True})
for node1, node2 in mini.edges_iter():
g1 = mini.node[node1]['g']
g2 = mini.node[node2]['g']
circuit.add_edge(g1, g2, {'c': 10, 'arrow': 'none', 'directed': False})
return circuit
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_graph(request, project_id=None):
project_id = int(project_id)
skeleton_ids = set(int(v) for k,v in request.POST.iteritems() if k.startswith('skeleton_list['))
confidence_threshold = int(request.POST.get('confidence_threshold', 0))
bandwidth = float(request.POST.get('bandwidth', 0)) # in nanometers
cable_spread = float(request.POST.get('cable_spread', 2500)) # in nanometers
path_confluence = int(request.POST.get('path_confluence', 10)) # a count
compute_risk = 1 == int(request.POST.get('risk', 0))
expand = set(int(v) for k,v in request.POST.iteritems() if k.startswith('expand['))
circuit = _skeleton_graph(project_id, skeleton_ids, confidence_threshold, bandwidth, expand, compute_risk, cable_spread, path_confluence)
package = {'nodes': [{'data': props} for props in circuit.node.itervalues()],
'edges': []}
edges = package['edges']
for g1, g2, props in circuit.edges_iter(data=True):
id1 = circuit.node[g1]['id']
id2 = circuit.node[g2]['id']
data = {'id': '%s_%s' % (id1, id2),
'source': id1,
'target': id2,
'weight': props['c'],
'label': str(props['c']) if props['directed'] else None,
'directed': props['directed'],
'arrow': props['arrow']}
if compute_risk:
data['risk'] = props.get('risk')
edges.append({'data': data})
return HttpResponse(json.dumps(package))
class Counts():
def __init__(self):
self.inputs = 0
self.outputs = 0
self.seenInputs = 0
self.seenOutputs = 0
self.nPossibleIOPaths = 0
self.synapse_centrality = 0
def _node_centrality_by_synapse_db(skeleton_id):
""" Compute the synapse centrality of every node in a tree.
Return the dictionary of node ID keys and Count values.
This function is meant for TESTING. """
cursor = connection.cursor()
cursor.execute('''
SELECT t.id, t.parent_id, r.relation_name
FROM treenode t LEFT OUTER JOIN (treenode_connector tc INNER JOIN relation r ON tc.relation_id = r.id) ON t.skeleton_id = tc.skeleton_id
WHERE t.skeleton_id = %s
''' % skeleton_id)
nodes = {} # node ID vs Counts
tree = nx.DiGraph()
root = None
totalInputs = 0
totalOutputs = 0
for row in cursor.fetchall():
counts = nodes.get(row[0])
if not counts:
counts = Counts()
nodes[row[0]] = counts
if row[2]:
if 'presynaptic_to' == row[2]:
counts.outputs += 1
totalOutputs += 1
elif 'postsynaptic_to' == row[2]:
counts.inputs += 1
totalInputs += 1
if row[1]:
tree.add_edge(row[0], row[1])
else:
root = row[0]
_node_centrality_by_synapse(tree, nodes, totalOutputs, totalInputs)
return nodes
def _node_centrality_by_synapse(tree, nodes, totalOutputs, totalInputs):
""" tree: a DiGraph
nodes: a dictionary of treenode ID vs Counts instance
totalOutputs: the total number of output synapses of the tree
totalInputs: the total number of input synapses of the tree
Returns nothing, the results are an update to the Counts instance of each treenode entry in nodes, namely the nPossibleIOPaths. """
# 1. Ensure the root is an end by checking that it has only one child; otherwise reroot at the first end node found
if 0 == totalOutputs:
# Not computable
for counts in nodes.itervalues():
counts.synapse_centrality = -1
return
if len(tree.successors(find_root(tree))) > 1:
# Reroot at the first end node found
tree = tree.copy()
endNode = (nodeID for nodeID in nodes.iterkeys() if not tree.successors(nodeID)).next()
reroot(tree, endNode)
# 2. Partition into sequences, sorted from small to large
sequences = sorted(partition(tree), key=len)
# 3. Traverse all partitions counting synapses seen
for seq in sequences:
# Each seq runs from an end node towards the root or a branch node
seenI = 0
seenO = 0
for nodeID in seq:
counts = nodes[nodeID]
seenI += counts.inputs + counts.seenInputs
seenO += counts.outputs + counts.seenOutputs
counts.seenInputs = seenI
counts.seenOutputs = seenO
counts.nPossibleIOPaths = counts.seenInputs * (totalOutputs - counts.seenOutputs) + counts.seenOutputs * (totalInputs - counts.seenInputs)
counts.synapse_centrality = counts.nPossibleIOPaths / float(totalOutputs)
|
catsop/CATMAID
|
django/applications/catmaid/control/graph.py
|
Python
|
gpl-3.0
| 19,605
|
[
"NEURON"
] |
7818c0028bf262dc1f6c90dd0bf71ec17598144f9b98744ab13cea13206b7891
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Module.
"""
from unittest import skipIf
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.video.video import VidoComponentPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import UniqueCourseTest, is_youtube_available
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class CMSVideoBaseTest(UniqueCourseTest):
"""
CMS Video Module Base Test Class
"""
def setUp(self):
"""
Initialization of pages and course fixture for tests
"""
super(CMSVideoBaseTest, self).setUp()
self.video = VidoComponentPage(self.browser)
# This will be initialized later
self.unit_page = None
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
def _install_course_fixture(self):
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
"""
# Create course with Video component
self.course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc('video', 'Video'),
)
)
)
).install()
# Auto login and register the course
AutoAuthPage(
self.browser,
staff=False,
username=self.course_fixture.user.get('username'),
email=self.course_fixture.user.get('email'),
password=self.course_fixture.user.get('password')
).visit()
def _navigate_to_course_unit_page(self):
"""
Open the course from the dashboard and expand the section and subsection and click on the Unit link
The end result is the page where the user is editing the newly created unit
"""
# Visit Course Outline page
self.outline.visit()
# Visit Unit page
self.unit_page = self.outline.section('Test Section').subsection('Test Subsection').toggle_expand().unit(
'Test Unit').go_to()
self.video.wait_for_video_component_render()
def navigate_to_course_unit(self):
"""
Install the course with required components and navigate to course unit page
"""
self._install_course_fixture()
self._navigate_to_course_unit_page()
def edit_component(self):
"""
Make component editable and open components Edit Dialog.
Arguments:
handout_filename (str): handout file name to be uploaded
save_settings (bool): save settings or not
"""
self.unit_page.set_unit_visibility('private')
self.unit_page.components[0].edit()
def open_advanced_tab(self):
"""
Open components advanced tab.
"""
self.unit_page.components[0].open_advanced_tab()
def save_unit_settings(self):
"""
Save component settings.
"""
self.unit_page.components[0].save_settings()
|
LICEF/edx-platform
|
common/test/acceptance/tests/video/test_studio_video_module.py
|
Python
|
agpl-3.0
| 3,784
|
[
"VisIt"
] |
5393f69a550781a4d3a12e0b915f88065ff4ada5e6f9e81559d0e62bacac9d51
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
import numpy
class FitEllipsoidToMask(NoConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
self._input_data = None
self._output_dict = {}
# polydata pipeline to make crosshairs
self._ls1 = vtk.vtkLineSource()
self._ls2 = vtk.vtkLineSource()
self._ls3 = vtk.vtkLineSource()
self._append_pd = vtk.vtkAppendPolyData()
self._append_pd.AddInput(self._ls1.GetOutput())
self._append_pd.AddInput(self._ls2.GetOutput())
self._append_pd.AddInput(self._ls3.GetOutput())
NoConfigModuleMixin.__init__(
self, {'Module (self)' : self})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
NoConfigModuleMixin.close(self)
def get_input_descriptions(self):
return ('VTK image data',)
def set_input(self, idx, input_stream):
self._input_data = input_stream
def get_output_descriptions(self):
return ('Ellipsoid (eigen-analysis) parameters', 'Crosshairs polydata')
def get_output(self, idx):
if idx == 0:
return self._output_dict
else:
return self._append_pd.GetOutput()
def execute_module(self):
ii = self._input_data
if not ii:
return
# now we need to iterate through the whole input data
ii.Update()
iorigin = ii.GetOrigin()
ispacing = ii.GetSpacing()
maxx, maxy, maxz = ii.GetDimensions()
numpoints = 0
points = []
for z in range(maxz):
wz = z * ispacing[2] + iorigin[2]
for y in range(maxy):
wy = y * ispacing[1] + iorigin[1]
for x in range(maxx):
v = ii.GetScalarComponentAsDouble(x,y,z,0)
if v > 0.0:
wx = x * ispacing[0] + iorigin[0]
points.append((wx,wy,wz))
# covariance matrix ##############################
if len(points) == 0:
self._output_dict.update({'u' : None, 'v' : None, 'c' : None,
'axis_lengths' : None,
'radius_vectors' : None})
return
# determine centre (x,y,z)
points2 = numpy.array(points)
centre = numpy.average(points2, 0)
cx,cy,cz = centre
# subtract centre from all points
points_c = points2 - centre
covariance = numpy.cov(points_c.transpose())
# eigen-analysis (u eigenvalues, v eigenvectors)
u,v = numpy.linalg.eig(covariance)
# estimate length at 2.0 * standard deviation in both directions
axis_lengths = [4.0 * numpy.sqrt(eigval) for eigval in u]
radius_vectors = numpy.zeros((3,3), float)
for i in range(3):
radius_vectors[i] = v[i] * axis_lengths[i] / 2.0
self._output_dict.update({'u' :u, 'v' : v, 'c' : (cx,cy,cz),
'axis_lengths' : tuple(axis_lengths),
'radius_vectors' : radius_vectors})
# now modify output polydata #########################
lss = [self._ls1, self._ls2, self._ls3]
for i in range(len(lss)):
half_axis = radius_vectors[i] #axis_lengths[i] / 2.0 * v[i]
ca = numpy.array((cx,cy,cz))
lss[i].SetPoint1(ca - half_axis)
lss[i].SetPoint2(ca + half_axis)
self._append_pd.Update()
def pca(points):
"""PCA factored out of execute_module and made N-D. not being used yet.
points is a list of M N-d tuples.
returns eigenvalues (u), eigenvectors (v), axis_lengths and
radius_vectors.
"""
# for a list of M N-d tuples, returns an array with M rows and N
# columns
points2 = numpy.array(points)
# determine centre by averaging over 0th axis (over rows)
centre = numpy.average(points2, 0)
# subtract centre from all points
points_c = points2 - centre
covariance = numpy.cov(points_c.transpose())
# eigen-analysis (u eigenvalues, v eigenvectors)
u,v = numpy.linalg.eig(covariance)
# estimate length at 2.0 * standard deviation in both directions
axis_lengths = [4.0 * numpy.sqrt(eigval) for eigval in u]
N = len(u)
radius_vectors = numpy.zeros((N,N), float)
for i in range(N):
radius_vectors[i] = v[i] * axis_lengths[i] / 2.0
output_dict = {'u' :u, 'v' : v, 'c' : centre,
'axis_lengths' : tuple(axis_lengths),
'radius_vectors' : radius_vectors}
return output_dict
|
nagyistoce/devide
|
modules/filters/FitEllipsoidToMask.py
|
Python
|
bsd-3-clause
| 5,248
|
[
"VTK"
] |
cc0b04124e14a6d29e158c2aab6370b657c91d9b812e01c0dcb513ef7a6d8343
|
"""Validate dependencies."""
import ast
from pathlib import Path
from typing import Dict, Set
from homeassistant.requirements import DISCOVERY_INTEGRATIONS
from .model import Integration
class ImportCollector(ast.NodeVisitor):
"""Collect all integrations referenced."""
def __init__(self, integration: Integration):
"""Initialize the import collector."""
self.integration = integration
self.referenced: Dict[Path, Set[str]] = {}
# Current file or dir we're inspecting
self._cur_fil_dir = None
def collect(self) -> None:
"""Collect imports from a source file."""
for fil in self.integration.path.glob("**/*.py"):
if not fil.is_file():
continue
self._cur_fil_dir = fil.relative_to(self.integration.path)
self.referenced[self._cur_fil_dir] = set()
self.visit(ast.parse(fil.read_text()))
self._cur_fil_dir = None
def _add_reference(self, reference_domain: str):
"""Add a reference."""
self.referenced[self._cur_fil_dir].add(reference_domain)
def visit_ImportFrom(self, node):
"""Visit ImportFrom node."""
if node.module is None:
return
if node.module.startswith("homeassistant.components."):
# from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
# from homeassistant.components.logbook import bla
self._add_reference(node.module.split(".")[2])
elif node.module == "homeassistant.components":
# from homeassistant.components import sun
for name_node in node.names:
self._add_reference(name_node.name)
def visit_Import(self, node):
"""Visit Import node."""
# import homeassistant.components.hue as hue
for name_node in node.names:
if name_node.name.startswith("homeassistant.components."):
self._add_reference(name_node.name.split(".")[2])
def visit_Attribute(self, node):
"""Visit Attribute node."""
# hass.components.hue.async_create()
# Name(id=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
# self.hass.components.hue.async_create()
# Name(id=self)
# .Attribute(attr=hass)
# .Attribute(attr=hue)
# .Attribute(attr=async_create)
if (
isinstance(node.value, ast.Attribute)
and node.value.attr == "components"
and (
(
isinstance(node.value.value, ast.Name)
and node.value.value.id == "hass"
)
or (
isinstance(node.value.value, ast.Attribute)
and node.value.value.attr == "hass"
)
)
):
self._add_reference(node.attr)
else:
# Have it visit other kids
self.generic_visit(node)
ALLOWED_USED_COMPONENTS = {
# This component will always be set up
"persistent_notification",
# These allow to register things without being set up
"conversation",
"frontend",
"hassio",
"system_health",
"websocket_api",
"automation",
"device_automation",
"zone",
"homeassistant",
"system_log",
"person",
# Discovery
"discovery",
# Other
"mjpeg", # base class, has no reqs or component to load.
"stream", # Stream cannot install on all systems, can be imported without reqs.
}
IGNORE_VIOLATIONS = [
# Has same requirement, gets defaults.
("sql", "recorder"),
# Sharing a base class
("openalpr_cloud", "openalpr_local"),
("lutron_caseta", "lutron"),
("ffmpeg_noise", "ffmpeg_motion"),
# Demo
("demo", "manual"),
("demo", "openalpr_local"),
# This should become a helper method that integrations can submit data to
("websocket_api", "lovelace"),
# Expose HA to external systems
"homekit",
"alexa",
"google_assistant",
"emulated_hue",
"prometheus",
"conversation",
"logbook",
"mobile_app",
# These should be extracted to external package
"pvoutput",
"dwd_weather_warnings",
# Should be rewritten to use own data fetcher
"scrape",
]
def calc_allowed_references(integration: Integration) -> Set[str]:
"""Return a set of allowed references."""
allowed_references = (
ALLOWED_USED_COMPONENTS
| set(integration.manifest["dependencies"])
| set(integration.manifest.get("after_dependencies", []))
)
# Discovery requirements are ok if referenced in manifest
for check_domain, to_check in DISCOVERY_INTEGRATIONS.items():
if any(check in integration.manifest for check in to_check):
allowed_references.add(check_domain)
return allowed_references
def find_non_referenced_integrations(
integrations: Dict[str, Integration],
integration: Integration,
references: Dict[Path, Set[str]],
):
"""Find intergrations that are not allowed to be referenced."""
allowed_references = calc_allowed_references(integration)
referenced = set()
for path, refs in references.items():
if len(path.parts) == 1:
# climate.py is stored as climate
cur_fil_dir = path.stem
else:
# climate/__init__.py is stored as climate
cur_fil_dir = path.parts[0]
is_platform_other_integration = cur_fil_dir in integrations
for ref in refs:
# We are always allowed to import from ourselves
if ref == integration.domain:
continue
# These references are approved based on the manifest
if ref in allowed_references:
continue
# Some violations are whitelisted
if (integration.domain, ref) in IGNORE_VIOLATIONS:
continue
# If it's a platform for another integration, the other integration is ok
if is_platform_other_integration and cur_fil_dir == ref:
continue
# These have a platform specified in this integration
if not is_platform_other_integration and (
(integration.path / f"{ref}.py").is_file()
# Platform dir
or (integration.path / ref).is_dir()
):
continue
referenced.add(ref)
return referenced
def validate_dependencies(
integrations: Dict[str, Integration], integration: Integration
):
"""Validate all dependencies."""
# Some integrations are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
# Find usage of hass.components
collector = ImportCollector(integration)
collector.collect()
for domain in sorted(
find_non_referenced_integrations(
integrations, integration, collector.referenced
)
):
integration.add_error(
"dependencies",
f"Using component {domain} but it's not in 'dependencies' "
"or 'after_dependencies'",
)
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check for non-existing dependencies
for integration in integrations.values():
if not integration.manifest:
continue
validate_dependencies(integrations, integration)
# check that all referenced dependencies exist
for dep in integration.manifest["dependencies"]:
if dep not in integrations:
integration.add_error(
"dependencies", f"Dependency {dep} does not exist"
)
|
Teagan42/home-assistant
|
script/hassfest/dependencies.py
|
Python
|
apache-2.0
| 7,786
|
[
"VisIt"
] |
57ef5e0d4e8bcc61a4e2e1eb90fc1894cb1854be1f98b73f281ef06b07a48b6a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# perses documentation build configuration file, created by
# sphinx-quickstart on Sun May 14 17:09:48 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
#'sphinx.ext.githubpages'
]
autosummary_generate = True
autodoc_default_options = {
"members": True,
"inherited-members": True,
}
# autodoc_mock_imports = [
# "matplotlib",
# "mdtraj",
# "netCDF4",
# "networkx",
# "oechem",
# "openeye",
# "openforcefield",
# "openmmtools",
# "openmoltools",
# "parmed",
# "progressbar",
# "pymbar",
# "scipy",
# "seaborn",
# "simtk",
# "tqdm",
# # "numba", # this must be installed because mocking fails with jit stuff
# # "perses.rjmc.coordinate_numba",
# ]
numpydoc_class_members_toctree = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'perses'
copyright = '2016-2017, Chodera lab // MSKCC'
author = 'Chodera lab // MSKCC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
pygments_style = 'paraiso-dark'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"logo_name": 'true',
'page_width': 'auto',
'github_button': 'true',
'github_user': 'choderalab',
'github_repo': 'perses',
'github_banner': 'true',
'travis_button': 'false',
'show_powered_by' :'true',
'font_family': 'calibri, helvetica, sans-serif',
'head_font_family': 'cambria, tahoma, serif',
'description' : 'A batteries-included toolkit for the GPU-accelerated OpenMM molecular simulation engine',
'pre_bg': '#41323f', # color used for syntax block
'body_text': "#41323f",
"pink_1": "peachpuff",
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'persesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'perses.tex', 'perses Documentation',
'Chodera lab // MSKCC', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'perses', 'perses Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'perses', 'perses Documentation',
author, 'perses', 'One line description of project.',
'Miscellaneous'),
]
|
choderalab/perses
|
docs/conf.py
|
Python
|
mit
| 6,395
|
[
"MDTraj",
"OpenMM"
] |
89157566599886584ca100e99eebb015292c6b49acb66abb6ea11b2db74f9d68
|
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal)
from numpy import linspace, sin, cos, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
check_rbf1d_interpolation(function)
check_rbf2d_interpolation(function)
check_rbf3d_interpolation(function)
def check_2drbf1d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (1D)
x = linspace(0, 10, 9)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, function=function, mode='N-D')
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_2drbf2d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (2D).
x = random.rand(50, ) * 4 - 2
y = random.rand(50, ) * 4 - 2
z0 = x * exp(-x ** 2 - 1j * y ** 2)
z1 = y * exp(-y ** 2 - 1j * x ** 2)
z = np.vstack([z0, z1]).T
rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
zi = rbf(x, y)
zi.shape = z.shape
assert_array_almost_equal(z, zi)
def check_2drbf3d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (3D).
x = random.rand(50, ) * 4 - 2
y = random.rand(50, ) * 4 - 2
z = random.rand(50, ) * 4 - 2
d0 = x * exp(-x ** 2 - y ** 2)
d1 = y * exp(-y ** 2 - x ** 2)
d = np.vstack([d0, d1]).T
rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
di = rbf(x, y, z)
di.shape = d.shape
assert_array_almost_equal(di, d)
def test_2drbf_interpolation():
for function in FUNCTIONS:
check_2drbf1d_interpolation(function)
check_2drbf2d_interpolation(function)
check_2drbf3d_interpolation(function)
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_2drbf1d_regularity(function, atol):
# Check that the 2-D Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, function=function, mode='N-D')
xi = linspace(0, 10, 100)
yi = rbf(xi)
msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
def test_2drbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.15,
'linear': 0.2
}
for function in FUNCTIONS:
check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
check_rbf1d_stability(function)
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
|
WarrenWeckesser/scipy
|
scipy/interpolate/tests/test_rbf.py
|
Python
|
bsd-3-clause
| 6,546
|
[
"Gaussian"
] |
8c4d4bcae9bd7cbce0b60d74fc15627c40ae44aa96735414388a643fd8943b91
|
from ase import *
a = 3.6
b = a / 2
fcc = Atoms('Cu', positions=[(0, 0, 0)],
cell=[(0, b, b), (b, 0, b), (b, b, 0)],
pbc=1)
fcc *= (2, 1, 1)
fcc.set_calculator(EMT())
fcc.set_momenta([(0.9, 0.0, 0.0), (-0.9, 0, 0)])
md = VelocityVerlet(fcc, dt=0.1)
def f():
print fcc.get_potential_energy(), fcc.get_total_energy()
md.attach(f)
md.attach(PickleTrajectory('Cu2.traj', 'w', fcc).write, interval=3)
md.run(steps=20)
fcc2 = PickleTrajectory('Cu2.traj', 'r')[-1]
|
freephys/python_ase
|
ase/test/md.py
|
Python
|
gpl-3.0
| 487
|
[
"ASE"
] |
807b39deefc93a7239e447f4a1ddab9db56690602fbee2f5b820b1ea1fe6baaa
|
# dataset.py
"""Module for Dataset class
Overview of Dicom object model:
Dataset(derived class of Python's dict class)
contains DataElement instances (DataElement is a class with tag, VR, value)
the value can be a Sequence instance
(Sequence is derived from Python's list),
or just a regular value like a number, string, etc.,
or a list of regular values, e.g. a 3d coordinate
Sequence's are a list of Datasets (note recursive nature here)
"""
#
# Copyright (c) 2008-2013 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
#
import sys
import inspect # for __dir__
import os.path
import io
from pydicom import compat
from pydicom.charset import default_encoding, convert_encodings
from pydicom.datadict import dictionaryVR
from pydicom.datadict import tag_for_name, all_names_for_tag
from pydicom.tag import Tag, BaseTag
from pydicom.dataelem import DataElement, DataElement_from_raw, RawDataElement
from pydicom.uid import NotCompressedPixelTransferSyntaxes, UncompressedPixelTransferSyntaxes
from pydicom.tagtools import tag_in_exception
import pydicom # for write_file
import pydicom.charset
from pydicom.config import logger
import pydicom.encaps
sys_is_little_endian = (sys.byteorder == 'little')
have_numpy = True
try:
import numpy
except ImportError:
have_numpy = False
have_gdcm = True
try:
import gdcm
except ImportError:
have_gdcm = False
stat_available = True
try:
from os import stat
except ImportError:
stat_available = False
have_jpeg_ls = True
try:
import jpeg_ls
except ImportError:
have_jpeg_ls = False
have_pillow = True
try:
from PIL import Image as PILImg
except ImportError:
have_pillow = False
# If that failed, try the alternate import syntax for PIL.
try:
import Image as PILImg
except ImportError:
# Neither worked, so it's likely not installed.
have_pillow = False
class PropertyError(Exception):
"""For AttributeErrors caught in a property, so do not go to __getattr__"""
# http://docs.python.org/release/3.1.3/tutorial/errors.html#tut-userexceptions
pass
class Dataset(dict):
"""A collection (dictionary) of Dicom `DataElement` instances.
Example of two ways to retrieve or set values:
1. dataset[0x10, 0x10].value --> patient's name
2. dataset.PatientName --> patient's name
Example (2) uses DICOM "keywords", defined starting in 2011 standard.
PatientName is not actually a member of the object, but unknown member
requests are checked against the DICOM dictionary. If the name matches a
DicomDictionary descriptive string, the corresponding tag is used
to look up or set the `DataElement` instance's value.
:attribute indent_chars: for string display, the characters used to indent
nested Data Elements (e.g. sequence items). Default is three spaces.
"""
indent_chars = " "
# Python 2: Classes which define __eq__ should flag themselves as unhashable
__hash__ = None
def __init__(self, *args, **kwargs):
self._parent_encoding = kwargs.get('parent_encoding', default_encoding)
dict.__init__(self, *args)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def add(self, data_element):
"""Equivalent to dataset[data_element.tag] = data_element."""
self[data_element.tag] = data_element
def add_new(self, tag, VR, value):
"""Create a new DataElement instance and add it to this Dataset."""
data_element = DataElement(tag, VR, value)
# use data_element.tag since DataElement verified it
self[data_element.tag] = data_element
def data_element(self, name):
"""Return the full data_element instance for the given descriptive name.
Parameters
----------
name: str
A DICOM keyword
Returns
-------
DataElement instance or None
Returns a DataElement instance in this dataset with the given name.
If the tag for that name is not found, returns None.
"""
tag = tag_for_name(name)
if tag:
return self[tag]
return None
def __contains__(self, name):
"""Extend dict.__contains__() to handle DICOM keywords.
This is called for code like: ``if 'SliceLocation' in dataset``.
"""
if isinstance(name, (str, compat.text_type)):
tag = tag_for_name(name)
else:
try:
tag = Tag(name)
except:
return False
if tag:
return dict.__contains__(self, tag)
else:
return dict.__contains__(self, name) # will no doubt raise an exception
def decode(self):
"""Apply character set decoding to all data elements.
See DICOM PS3.5-2008 6.1.1.
"""
# Find specific character set. 'ISO_IR 6' is default
# May be multi-valued, but let pydicom.charset handle all logic on that
dicom_character_set = self._character_set
# Shortcut to the decode function in pydicom.charset
decode_data_element = pydicom.charset.decode
# Callback for walk(), to decode the chr strings if necessary
# This simply calls the pydicom.charset.decode function
def decode_callback(ds, data_element):
if data_element.VR == 'SQ':
[dset.decode() for dset in data_element.value]
else:
decode_data_element(data_element, dicom_character_set)
self.walk(decode_callback, recursive=False)
def __delattr__(self, name):
"""Intercept requests to delete an attribute by name, e.g. del ds.name
If name is a DICOM keyword, then delete the corresponding tag
and data_element. Else, delete an instance (python) attribute
as any other class would do
"""
# First check if a valid DICOM keyword and if we have that data element
tag = tag_for_name(name)
if tag is not None and tag in self:
dict.__delitem__(self, tag) # direct to dict as we know we have key
# If not a DICOM name in this dataset, check for regular instance name
# can't do delete directly, that will call __delattr__ again
elif name in self.__dict__:
del self.__dict__[name]
# Not found, raise an error in same style as python does
else:
raise AttributeError(name)
def __delitem__(self, key):
"""Intercept requests to delete an attribute by key, e.g. del ds[tag]"""
# Assume is a standard tag (for speed in common case)
try:
dict.__delitem__(self, key)
# If not a standard tag, than convert to Tag and try again
except KeyError:
tag = Tag(key)
dict.__delitem__(self, tag)
def __dir__(self):
"""Give a list of attributes available in the dataset
List of attributes is used, for example, in auto-completion in editors
or command-line environments.
"""
# Force zip object into a list in case of python3. Also backwards
# compatible
meths = set(list(zip(
*inspect.getmembers(Dataset, inspect.isroutine)))[0])
props = set(list(zip(
*inspect.getmembers(Dataset, inspect.isdatadescriptor)))[0])
dicom_names = set(self.dir())
alldir = sorted(props | meths | dicom_names)
return alldir
def dir(self, *filters):
"""Return an alphabetical list of data_element keywords in the dataset.
Intended mainly for use in interactive Python sessions.
Parameters
----------
filters : str
Zero or more string arguments to the function. Used for
case-insensitive match to any part of the DICOM name.
Returns
-------
All data_element names in this dataset matching the filters.
If no filters, return all DICOM keywords in the dataset.
"""
allnames = []
for tag, data_element in self.items():
allnames.extend(all_names_for_tag(tag))
# remove blanks - tags without valid names (e.g. private tags)
allnames = [x for x in allnames if x]
# Store found names in a dict, so duplicate names appear only once
matches = {}
for filter_ in filters:
filter_ = filter_.lower()
match = [x for x in allnames if x.lower().find(filter_) != -1]
matches.update(dict([(x, 1) for x in match]))
if filters:
names = sorted(matches.keys())
return names
else:
return sorted(allnames)
def __eq__(self, other):
"""
Compare `self` and `other` for equality
Returns
-------
bool
The result if `self` and `other` are the same class
NotImplemented
If `other` is not the same class as `self` then returning
NotImplemented delegates the result to superclass.__eq__(subclass)
"""
# When comparing against self this will be faster
if other is self:
return True
if isinstance(other, self.__class__):
# Compare Elements using values() and class variables using __dict__
# Convert values() to a list for compatibility between python 2 and 3
return (list(self.values()) == list(other.values())) and (self.__dict__ == other.__dict__)
return NotImplemented
def get(self, key, default=None):
"""Extend dict.get() to handle DICOM keywords"""
if isinstance(key, (str, compat.text_type)):
try:
return getattr(self, key)
except AttributeError:
return default
else:
# is not a string, try to make it into a tag and then hand it
# off to the underlying dict
if not isinstance(key, BaseTag):
try:
key = Tag(key)
except:
raise TypeError("Dataset.get key must be a string or tag")
try:
return_val = self.__getitem__(key)
except KeyError:
return_val = default
return return_val
def __getattr__(self, name):
"""Intercept requests for unknown Dataset python-attribute names.
If the name matches a Dicom keyword,
return the value for the data_element with the corresponding tag.
"""
# __getattr__ only called if instance cannot find name in self.__dict__
# So, if name is not a dicom string, then is an error
tag = tag_for_name(name)
if tag is None:
raise AttributeError("Dataset does not have attribute "
"'{0:s}'.".format(name))
tag = Tag(tag)
if tag not in self:
raise AttributeError("Dataset does not have attribute "
"'{0:s}'.".format(name))
else: # do have that dicom data_element
return self[tag].value
@property
def _character_set(self):
char_set = self.get('SpecificCharacterSet', None)
if not char_set:
char_set = self._parent_encoding
else:
char_set = convert_encodings(char_set)
return char_set
def __getitem__(self, key):
"""Operator for dataset[key] request."""
tag = Tag(key)
data_elem = dict.__getitem__(self, tag)
if isinstance(data_elem, DataElement):
return data_elem
elif isinstance(data_elem, tuple):
# If a deferred read, then go get the value now
if data_elem.value is None:
from pydicom.filereader import read_deferred_data_element
data_elem = read_deferred_data_element(self.fileobj_type,
self.filename, self.timestamp, data_elem)
if tag != (0x08, 0x05):
character_set = self._character_set
else:
character_set = default_encoding
# Not converted from raw form read from file yet; do so now
self[tag] = DataElement_from_raw(data_elem, character_set)
return dict.__getitem__(self, tag)
def get_item(self, key):
"""Return the raw data element if possible.
It will be raw if the user has never accessed the value,
or set their own value.
Note if the data element is a deferred-read element,
then it is read and converted before being returned
"""
tag = Tag(key)
data_elem = dict.__getitem__(self, tag)
# If a deferred read, return using __getitem__ to read and convert it
if isinstance(data_elem, tuple) and data_elem.value is None:
return self[key]
return data_elem
def group_dataset(self, group):
"""Return a Dataset containing only data_elements of a certain group.
Parameters
----------
group : the group part of a dicom (group, element) tag.
Returns
-------
A dataset instance containing data elements of the group specified.
"""
ds = Dataset()
ds.update(dict([(tag, data_element) for tag, data_element in self.items()
if tag.group == group]))
return ds
def __iter__(self):
"""Method to iterate through the dataset, returning data_elements.
e.g.:
for data_element in dataset:
do_something...
The data_elements are returned in DICOM order,
i.e. in increasing order by tag value.
Sequence items are returned as a single data_element; it is up to the
calling code to recurse into the Sequence items if desired
"""
# Note this is different than the underlying dict class,
# which returns the key of the key:value mapping.
# Here the value is returned (but data_element.tag has the key)
taglist = sorted(self.keys())
for tag in taglist:
yield self[tag]
def _is_uncompressed_transfer_syntax(self):
# FIXME uses file_meta here, should really only be thus for FileDataset
return self.file_meta.TransferSyntaxUID in NotCompressedPixelTransferSyntaxes
def __ne__(self, other):
""" Compare `self` and `other` for inequality """
return not (self == other)
def _pixel_data_numpy(self):
"""Return a NumPy array of the pixel data if NumPy is available.
Falls back to GDCM in case of unsupported transfer syntaxes.
Raises
------
TypeError
If there is no pixel data or not a supported data type
ImportError
If NumPy isn't found, or in the case of fallback, if GDCM isn't found.
Returns
-------
NumPy array
"""
if not self._is_uncompressed_transfer_syntax():
if not have_gdcm:
raise NotImplementedError("Pixel Data is compressed in a format pydicom does not yet handle. Cannot return array. Pydicom might be able to convert the pixel data using GDCM if it is installed.")
elif not self.filename:
raise NotImplementedError("GDCM is only supported when the dataset has been created with a filename.")
if not have_numpy:
msg = "The Numpy package is required to use pixel_array, and numpy could not be imported."
raise ImportError(msg)
if 'PixelData' not in self:
raise TypeError("No pixel data found in this dataset.")
# There are two cases:
# 1) uncompressed PixelData -> use numpy
# 2) compressed PixelData, filename is available and GDCM is available -> use GDCM
if self._is_uncompressed_transfer_syntax():
# Make NumPy format code, e.g. "uint16", "int32" etc
# from two pieces of info:
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
# self.BitsAllocated -- 8, 16, or 32
format_str = '%sint%d' % (('u', '')[self.PixelRepresentation],
self.BitsAllocated)
try:
numpy_dtype = numpy.dtype(format_str)
except TypeError:
msg = ("Data type not understood by NumPy: "
"format='%s', PixelRepresentation=%d, BitsAllocated=%d")
raise TypeError(msg % (format_str, self.PixelRepresentation,
self.BitsAllocated))
if self.is_little_endian != sys_is_little_endian:
numpy_dtype = numpy_dtype.newbyteorder('S')
pixel_bytearray = self.PixelData
elif have_gdcm and self.filename:
# read the file using GDCM
# FIXME this should just use self.PixelData instead of self.filename
# but it is unclear how this should be achieved using GDCM
gdcm_image_reader = gdcm.ImageReader()
gdcm_image_reader.SetFileName(self.filename)
if not gdcm_image_reader.Read():
raise TypeError("GDCM could not read DICOM image")
gdcm_image = gdcm_image_reader.GetImage()
# determine the correct numpy datatype
gdcm_numpy_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy.uint16,
gdcm.PixelFormat.INT16: numpy.int16,
gdcm.PixelFormat.UINT32: numpy.uint32,
gdcm.PixelFormat.INT32: numpy.int32,
gdcm.PixelFormat.FLOAT32: numpy.float32,
gdcm.PixelFormat.FLOAT64: numpy.float64
}
gdcm_pixel_format = gdcm_image.GetPixelFormat().GetScalarType()
if gdcm_pixel_format in gdcm_numpy_typemap:
numpy_dtype = gdcm_numpy_typemap[gdcm_pixel_format]
else:
raise TypeError('{0} is not a GDCM supported pixel format'.format(gdcm_pixel_format))
# GDCM returns char* as type str. Under Python 2 `str` are
# byte arrays by default. Python 3 decodes this to
# unicode strings by default.
# The SWIG docs mention that they always decode byte streams
# as utf-8 strings for Python 3, with the `surrogateescape`
# error handler configured.
# Therefore, we can encode them back to their original bytearray
# representation on Python 3 by using the same parameters.
pixel_bytearray = gdcm_image.GetBuffer()
if sys.version_info >= (3, 0):
pixel_bytearray = pixel_bytearray.encode("utf-8", "surrogateescape")
# if GDCM indicates that a byte swap is in order, make sure to inform numpy as well
if gdcm_image.GetNeedByteSwap():
numpy_dtype = numpy_dtype.newbyteorder('S')
# Here we need to be careful because in some cases, GDCM reads a
# buffer that is too large, so we need to make sure we only include
# the first n_rows * n_columns * dtype_size bytes.
n_bytes = self.Rows * self.Columns * numpy.dtype(numpy_dtype).itemsize
if len(pixel_bytearray) > n_bytes:
# We make sure that all the bytes after are in fact zeros
padding = pixel_bytearray[n_bytes:]
if numpy.any(numpy.fromstring(padding, numpy.byte)):
pixel_bytearray = pixel_bytearray[:n_bytes]
else:
# We revert to the old behavior which should then result in a
# Numpy error later on.
pass
pixel_array = numpy.fromstring(pixel_bytearray, dtype=numpy_dtype)
# Note the following reshape operations return a new *view* onto pixel_array, but don't copy the data
if 'NumberOfFrames' in self and self.NumberOfFrames > 1:
if self.SamplesPerPixel > 1:
# TODO: Handle Planar Configuration attribute
assert self.PlanarConfiguration == 0
pixel_array = pixel_array.reshape(self.NumberOfFrames, self.Rows, self.Columns, self.SamplesPerPixel)
else:
pixel_array = pixel_array.reshape(self.NumberOfFrames, self.Rows, self.Columns)
else:
if self.SamplesPerPixel > 1:
if self.BitsAllocated == 8:
if self.PlanarConfiguration == 0:
pixel_array = pixel_array.reshape(self.Rows, self.Columns, self.SamplesPerPixel)
else:
pixel_array = pixel_array.reshape(self.SamplesPerPixel, self.Rows, self.Columns)
pixel_array = pixel_array.transpose(1, 2, 0)
else:
raise NotImplementedError("This code only handles SamplesPerPixel > 1 if Bits Allocated = 8")
else:
pixel_array = pixel_array.reshape(self.Rows, self.Columns)
return pixel_array
def _compressed_pixel_data_numpy(self):
"""Return a NumPy array of the pixel data.
NumPy is a numerical package for python. It is used if available.
:raises TypeError: if no pixel data in this dataset.
:raises ImportError: if cannot import numpy.
"""
if 'PixelData' not in self:
raise TypeError("No pixel data found in this dataset.")
if not have_numpy:
msg = "The Numpy package is required to use pixel_array, and numpy could not be imported."
raise ImportError(msg)
# determine the type used for the array
need_byteswap = (self.is_little_endian != sys_is_little_endian)
# Make NumPy format code, e.g. "uint16", "int32" etc
# from two pieces of info:
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
# self.BitsAllocated -- 8, 16, or 32
format_str = '%sint%d' % (('u', '')[self.PixelRepresentation],
self.BitsAllocated)
try:
numpy_format = numpy.dtype(format_str)
except TypeError:
msg = ("Data type not understood by NumPy: "
"format='%s', PixelRepresentation=%d, BitsAllocated=%d")
raise TypeError(msg % (numpy_format, self.PixelRepresentation,
self.BitsAllocated))
if self.file_meta.TransferSyntaxUID in pydicom.uid.PILSupportedCompressedPixelTransferSyntaxes:
UncompressedPixelData = self._get_PIL_supported_compressed_pixeldata()
elif self.file_meta.TransferSyntaxUID in pydicom.uid.JPEGLSSupportedCompressedPixelTransferSyntaxes:
UncompressedPixelData = self._get_jpeg_ls_supported_compressed_pixeldata()
else:
msg = "The transfer syntax {0} is not currently supported.".format(self.file_meta.TransferSyntaxUID)
raise NotImplementedError(msg)
# Have correct Numpy format, so create the NumPy array
arr = numpy.fromstring(UncompressedPixelData, numpy_format)
# XXX byte swap - may later handle this in read_file!!?
if need_byteswap:
arr.byteswap(True) # True means swap in-place, don't make a new copy
# Note the following reshape operations return a new *view* onto arr, but don't copy the data
if 'NumberOfFrames' in self and self.NumberOfFrames > 1:
if self.SamplesPerPixel > 1:
arr = arr.reshape(self.NumberOfFrames, self.Rows, self.Columns, self.SamplesPerPixel)
else:
arr = arr.reshape(self.NumberOfFrames, self.Rows, self.Columns)
else:
if self.SamplesPerPixel > 1:
if self.BitsAllocated == 8:
if self.PlanarConfiguration == 0:
arr = arr.reshape(self.Rows, self.Columns, self.SamplesPerPixel)
else:
arr = arr.reshape(self.SamplesPerPixel, self.Rows, self.Columns)
arr = arr.transpose(1, 2, 0)
else:
raise NotImplementedError("This code only handles SamplesPerPixel > 1 if Bits Allocated = 8")
else:
arr = arr.reshape(self.Rows, self.Columns)
if (self.file_meta.TransferSyntaxUID in pydicom.uid.JPEG2000CompressedPixelTransferSyntaxes and self.BitsStored == 16):
# WHY IS THIS EVEN NECESSARY??
arr &= 0x7FFF
return arr
def _get_PIL_supported_compressed_pixeldata(self):
if not have_pillow:
msg = "The pillow package is required to use pixel_array for this transfer syntax {0}, and pillow could not be imported.".format(self.file_meta.TransferSyntaxUID)
raise ImportError(msg)
# decompress here
if self.file_meta.TransferSyntaxUID in pydicom.uid.JPEGLossyCompressedPixelTransferSyntaxes:
if self.BitsAllocated > 8:
raise NotImplementedError("JPEG Lossy only supported if Bits Allocated = 8")
generic_jpeg_file_header = b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x01\x00\x01\x00\x01\x00\x00'
frame_start_from = 2
elif self.file_meta.TransferSyntaxUID in pydicom.uid.JPEG2000CompressedPixelTransferSyntaxes:
generic_jpeg_file_header = b''
# generic_jpeg_file_header = b'\x00\x00\x00\x0C\x6A\x50\x20\x20\x0D\x0A\x87\x0A'
frame_start_from = 0
else:
generic_jpeg_file_header = b''
frame_start_from = 0
try:
UncompressedPixelData = ''
if 'NumberOfFrames' in self and self.NumberOfFrames > 1:
# multiple compressed frames
CompressedPixelDataSeq = pydicom.encaps.decode_data_sequence(self.PixelData)
for frame in CompressedPixelDataSeq:
data = generic_jpeg_file_header + frame[frame_start_from:]
fio = io.BytesIO(data)
try:
decompressed_image = PILImg.open(fio)
except IOError as e:
raise NotImplementedError(e.message)
UncompressedPixelData += decompressed_image.tobytes()
else:
# single compressed frame
UncompressedPixelData = pydicom.encaps.defragment_data(self.PixelData)
UncompressedPixelData = generic_jpeg_file_header + UncompressedPixelData[frame_start_from:]
try:
fio = io.BytesIO(UncompressedPixelData)
decompressed_image = PILImg.open(fio)
except IOError as e:
raise NotImplementedError(e.message)
UncompressedPixelData = decompressed_image.tobytes()
except:
raise
return UncompressedPixelData
def _get_jpeg_ls_supported_compressed_pixeldata(self):
if not have_jpeg_ls:
msg = "The jpeg_ls package is required to use pixel_array for this transfer syntax {0}, and jpeg_ls could not be imported.".format(self.file_meta.TransferSyntaxUID)
raise ImportError(msg)
# decompress here
UncompressedPixelData = ''
if 'NumberOfFrames' in self and self.NumberOfFrames > 1:
# multiple compressed frames
CompressedPixelDataSeq = pydicom.encaps.decode_data_sequence(self.PixelData)
# print len(CompressedPixelDataSeq)
for frame in CompressedPixelDataSeq:
decompressed_image = jpeg_ls.decode(numpy.fromstring(frame, dtype=numpy.uint8))
UncompressedPixelData += decompressed_image.tobytes()
else:
# single compressed frame
CompressedPixelData = pydicom.encaps.defragment_data(self.PixelData)
decompressed_image = jpeg_ls.decode(numpy.fromstring(CompressedPixelData, dtype=numpy.uint8))
UncompressedPixelData = decompressed_image.tobytes()
return UncompressedPixelData
# Use by pixel_array property
def _get_pixel_array(self):
# Check if already have converted to a NumPy array
# Also check if self.PixelData has changed. If so, get new NumPy array
already_have = True
if not hasattr(self, "_pixel_array"):
already_have = False
elif self._pixel_id != id(self.PixelData):
already_have = False
if not already_have and not self._is_uncompressed_transfer_syntax():
try:
# print("Pixel Data is compressed")
self._pixel_array = self._compressed_pixel_data_numpy()
self._pixel_id = id(self.PixelData) # is this guaranteed to work if memory is re-used??
return self._pixel_array
except IOError as I:
logger.info("Pillow or JPLS did not support this transfer syntax")
if not already_have:
self._pixel_array = self._pixel_data_numpy()
self._pixel_id = id(self.PixelData) # is this guaranteed to work if memory is re-used??
return self._pixel_array
@property
def pixel_array(self):
"""Return the pixel data as a NumPy array"""
try:
return self._get_pixel_array()
except AttributeError:
t, e, tb = sys.exc_info()
val = PropertyError("AttributeError in pixel_array property: " +
e.args[0])
compat.reraise(PropertyError, val, tb)
# Format strings spec'd according to python string formatting options
# See http://docs.python.org/library/stdtypes.html#string-formatting-operations
default_element_format = "%(tag)s %(name)-35.35s %(VR)s: %(repval)s"
default_sequence_element_format = "%(tag)s %(name)-35.35s %(VR)s: %(repval)s"
def formatted_lines(self, element_format=default_element_format,
sequence_element_format=default_sequence_element_format,
indent_format=None):
"""A generator to give back a formatted string representing each line
one at a time. Example:
for line in dataset.formatted_lines("%(name)s=%(repval)s", "SQ:%(name)s=%(repval)s"):
print(line)
See the source code for default values which illustrate some of the names that can be used in the
format strings
indent_format -- not used in current version. Placeholder for future functionality.
"""
for data_element in self.iterall():
# Get all the attributes possible for this data element (e.g.
# gets descriptive text name too)
# This is the dictionary of names that can be used in the format string
elem_dict = dict([(x, getattr(data_element, x)()
if callable(getattr(data_element, x))
else getattr(data_element, x))
for x in dir(data_element) if not x.startswith("_")])
if data_element.VR == "SQ":
yield sequence_element_format % elem_dict
else:
yield element_format % elem_dict
def _pretty_str(self, indent=0, top_level_only=False):
"""Return a string of the data_elements in this dataset, with indented levels.
This private method is called by the __str__() method
for handling print statements or str(dataset), and the __repr__() method.
It is also used by top(), which is the reason for the top_level_only flag.
This function recurses, with increasing indentation levels.
"""
strings = []
indent_str = self.indent_chars * indent
nextindent_str = self.indent_chars * (indent + 1)
for data_element in self:
with tag_in_exception(data_element.tag):
if data_element.VR == "SQ": # a sequence
strings.append(indent_str + str(data_element.tag) + " %s %i item(s) ---- " % (data_element.description(), len(data_element.value)))
if not top_level_only:
for dataset in data_element.value:
strings.append(dataset._pretty_str(indent + 1))
strings.append(nextindent_str + "---------")
else:
strings.append(indent_str + repr(data_element))
return "\n".join(strings)
def remove_private_tags(self):
"""Remove all Dicom private tags in this dataset and those contained within."""
def RemoveCallback(dataset, data_element):
"""Internal method to use as callback to walk() method."""
if data_element.tag.is_private:
# can't del self[tag] - won't be right dataset on recursion
del dataset[data_element.tag]
self.walk(RemoveCallback)
def save_as(self, filename, write_like_original=True):
"""Write the dataset to a file.
Parameters
----------
filename : str
Name of file to save new DICOM file to.
write_like_original : boolean
If True (default), preserves the following information from
the dataset:
-preamble -- if no preamble in read file, than not used here
-hasFileMeta -- if writer did not do file meta information,
then don't write here either
-seq.is_undefined_length -- if original had delimiters, write them now too,
instead of the more sensible length characters
- is_undefined_length_sequence_item -- for datasets that belong to a
sequence, write the undefined length delimiters if that is
what the original had.
If False, produces a "nicer" DICOM file for other readers,
where all lengths are explicit.
See Also
--------
pydicom.filewriter.write_file
Write a DICOM file from a FileDataset instance.
Notes
-----
Set dataset.preamble if you want something other than 128 0-bytes.
If the dataset was read from an existing dicom file, then its preamble
was stored at read time. It is up to the user to ensure the preamble is still
correct for its purposes.
If there is no Transfer Syntax tag in the dataset, then set
dataset.is_implicit_VR and dataset.is_little_endian
to determine the transfer syntax used to write the file.
"""
pydicom.write_file(filename, self, write_like_original)
def __setattr__(self, name, value):
"""Intercept any attempts to set a value for an instance attribute.
If name is a dicom descriptive string (cleaned with CleanName),
then set the corresponding tag and data_element.
Else, set an instance (python) attribute as any other class would do.
"""
tag = tag_for_name(name)
if tag is not None: # successfully mapped name to a tag
if tag not in self: # don't have this tag yet->create the data_element instance
VR = dictionaryVR(tag)
data_element = DataElement(tag, VR, value)
else: # already have this data_element, just changing its value
data_element = self[tag]
data_element.value = value
# Now have data_element - store it in this dict
self[tag] = data_element
else: # name not in dicom dictionary - setting a non-dicom instance attribute
# XXX note if user mis-spells a dicom data_element - no error!!!
self.__dict__[name] = value
def __setitem__(self, key, value):
"""Operator for dataset[key]=value. Check consistency, and deal with private tags"""
if not isinstance(value, (DataElement, RawDataElement)): # ok if is subclass, e.g. DeferredDataElement
raise TypeError("Dataset contents must be DataElement instances.\n"
"To set a data_element value use data_element.value=val")
tag = Tag(value.tag)
if key != tag:
raise ValueError("data_element.tag must match the dictionary key")
data_element = value
if tag.is_private:
# See PS 3.5-2008 section 7.8.1 (p. 44) for how blocks are reserved
logger.debug("Setting private tag %r" % tag)
private_block = tag.elem >> 8
private_creator_tag = Tag(tag.group, private_block)
if private_creator_tag in self and tag != private_creator_tag:
if isinstance(data_element, RawDataElement):
data_element = DataElement_from_raw(data_element, self._character_set)
data_element.private_creator = self[private_creator_tag].value
dict.__setitem__(self, tag, data_element)
def __str__(self):
"""Handle str(dataset)."""
return self._pretty_str()
def top(self):
"""Show the DICOM tags, but only the top level; do not recurse into Sequences"""
return self._pretty_str(top_level_only=True)
def trait_names(self):
"""Return a list of valid names for auto-completion code
Used in IPython, so that data element names can be found
and offered for autocompletion on the IPython command line
"""
return dir(self) # only valid python >=2.6, else use self.__dir__()
def update(self, dictionary):
"""Extend dict.update() to handle DICOM keywords."""
for key, value in list(dictionary.items()):
if isinstance(key, (str, compat.text_type)):
setattr(self, key, value)
else:
self[Tag(key)] = value
def iterall(self):
"""Iterate through the dataset, yielding all data elements.
Unlike Dataset.__iter__, this *does* recurse into sequences,
and so returns all data elements as if the file were "flattened".
"""
for data_element in self:
yield data_element
if data_element.VR == "SQ":
sequence = data_element.value
for dataset in sequence:
for elem in dataset.iterall():
yield elem
def walk(self, callback, recursive=True):
"""Walk over given function for all dataset data_elements.
Visit all data_elements, possibly recursing into sequences and their datasets,
The callback function is called for each data_element
(including SQ element).
Can be used to perform an operation on certain types of data_elements.
E.g., `remove_private_tags`() finds all private tags and deletes them.
`DataElement`s will come back in DICOM order (by increasing tag number
within their dataset)
Parameters
----------
callback: a callable that takes two arguments: a dataset, and
a data_element belonging to that dataset.
recursive : boolean
Flag to indicate whether to recurse into Sequences
"""
taglist = sorted(self.keys())
for tag in taglist:
with tag_in_exception(tag):
data_element = self[tag]
callback(self, data_element) # self = this Dataset
# 'tag in self' below needed in case callback deleted data_element
if recursive and tag in self and data_element.VR == "SQ":
sequence = data_element.value
for dataset in sequence:
dataset.walk(callback)
__repr__ = __str__
class FileDataset(Dataset):
def __init__(self, filename_or_obj, dataset, preamble=None, file_meta=None,
is_implicit_VR=True, is_little_endian=True):
"""Initialize a dataset read from a DICOM file.
Parameters
----------
filename_or_obj : str, None
Full path and filename to the file. Use None if is a BytesIO.
dataset : Dataset, dict
Some form of dictionary, usually a Dataset from read_dataset()
preamble : None, optional
The 128-byte DICOM preamble
file_meta : None, optional
The file meta info dataset, as returned by _read_file_meta,
or an empty dataset if no file meta information is in the file.
is_implicit_VR : boolean, optional
True (default) if implicit VR transfer syntax used; False if explicit VR.
is_little_endian : boolean
True (default) if little-endian transfer syntax used; False if big-endian.
"""
Dataset.__init__(self, dataset)
self.preamble = preamble
self.file_meta = file_meta
self.is_implicit_VR = is_implicit_VR
self.is_little_endian = is_little_endian
if isinstance(filename_or_obj, compat.string_types):
self.filename = filename_or_obj
self.fileobj_type = open
elif isinstance(filename_or_obj, io.BufferedReader):
self.filename = filename_or_obj.name
# This is the appropriate constructor for io.BufferedReader
self.fileobj_type = open
else:
self.fileobj_type = filename_or_obj.__class__ # use __class__ python <2.7?; http://docs.python.org/reference/datamodel.html
if getattr(filename_or_obj, "name", False):
self.filename = filename_or_obj.name
elif getattr(filename_or_obj, "filename", False): # gzip python <2.7?
self.filename = filename_or_obj.filename
else:
self.filename = None # e.g. came from BytesIO or something file-like
self.timestamp = None
if stat_available and self.filename and os.path.exists(self.filename):
statinfo = os.stat(self.filename)
self.timestamp = statinfo.st_mtime
|
mshunshin/SegNetCMR
|
pydicom/dataset.py
|
Python
|
mit
| 42,434
|
[
"VisIt"
] |
1e3fe5d58b2cfa28f62ba39115e396cc6601df03d4d0a58055830eaccd5fd22f
|
"""
Example ThirdWay experimentations.
"""
from __future__ import absolute_import, print_function
import matplotlib.pyplot as plt
import numpy as np
from thirdway.lightcurve import LightCurve, generate_lc_depth, kepler17_params_db
from thirdway.fitting import peak_finder, summed_gaussians, gaussian
from astropy.utils.console import ProgressBar
# Load light curve from jrad's text archive
light_curve_path = 'data/kepler17_whole.dat'
BJDREF = 2454833.
depth = 0.13413993**2
jd_minus_bjdref, flux, error = np.loadtxt(light_curve_path, unpack=True)
jd = jd_minus_bjdref + BJDREF
kepler17_params = kepler17_params_db()
# construct light curve object from those data
whole_lc = LightCurve(times=jd, fluxes=flux, errors=error)
transits = LightCurve(**whole_lc.mask_out_of_transit(kepler17_params)
).get_transit_light_curves(kepler17_params)
# The short cadence data begin after the 137th transit, so ignore all transits before then:
transits = transits[137:]
plots = True
delta_chi2 = {}#np.zeros(len(transits))
with ProgressBar(len(transits)) as bar:
for i, lc in enumerate(transits):
#lc.plot()
# Remove linear out-of-transit trend from transit
lc.remove_linear_baseline(kepler17_params)
residuals = lc.fluxes - generate_lc_depth(lc.times_jd, depth, kepler17_params)
best_fit_params = peak_finder(lc.times.jd, residuals, lc.errors,
kepler17_params)
transit_model = generate_lc_depth(lc.times_jd, depth, kepler17_params)
chi2_transit = np.sum((lc.fluxes - transit_model)**2/lc.errors**2)/len(lc.fluxes)
gaussian_model = summed_gaussians(lc.times.jd, best_fit_params)
if best_fit_params is not None:
split_input_parameters = np.split(np.array(best_fit_params), len(best_fit_params)/3)
delta_chi2[i] = []
for amplitude, t0, sigma in split_input_parameters:
model_i = gaussian(lc.times.jd, amplitude, t0, sigma)
chi2_bumps = np.sum((lc.fluxes - transit_model - model_i)**2/lc.errors**2)/len(lc.fluxes)
delta_chi2[i].append(np.abs(chi2_transit - chi2_bumps))
if plots:
fig, ax = plt.subplots(3, 1, figsize=(8, 14), sharex=True)
ax[0].errorbar(lc.times.jd, lc.fluxes, lc.errors, fmt='.', color='k')
ax[0].plot(lc.times.jd, transit_model, 'r')
ax[0].set(ylabel='Flux')
ax[1].axhline(0, color='gray', ls='--')
ax[1].errorbar(lc.times.jd, lc.fluxes - transit_model, fmt='.', color='k')
ax[1].plot(lc.times.jd, gaussian_model, color='r')
ax[1].set_ylabel('Transit Residuals')
ax[2].axhline(0, color='gray', ls='--')
ax[2].errorbar(lc.times.jd, lc.fluxes - transit_model - gaussian_model, fmt='.', color='k')
ax[2].set_ylabel('Gaussian Residuals')
ax[2].set_title(r'$Delta \chi^2$ = '+'{0}'.format(delta_chi2[i]))
fig.tight_layout()
fig.savefig('plots/{0:03d}.png'.format(i), bbox_inches='tight')
#plt.show()
plt.close()
bar.update()
print(delta_chi2.values())
all_delta_chi2 = np.concatenate(delta_chi2.values()).ravel()
fig, ax = plt.subplots(1,figsize=(12, 6))
ax.plot(np.log10(all_delta_chi2), '.')
plt.show()
|
jradavenport/ThirdWay
|
example_k17_bfgs.py
|
Python
|
mit
| 3,447
|
[
"Gaussian"
] |
47cffe1ce31d842184f0817260c8f08059adb3d9d7c7d200e864cf5cc0079ff7
|
"""Contains the classes that deal with constant temperature dynamics.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Contains the algorithms which propagate the thermostatting steps in the constant
temperature ensembles. Includes the new GLE thermostat, which can be used to
run PI+GLE dynamics, reducing the number of path integral beads required.
Classes:
Thermostat: Base thermostat class with the generic methods and attributes.
ThermoLangevin: Holds the algorithms for a langevin thermostat.
ThermoPILE_L: Holds the algorithms for a path-integral langevin equation
thermostat, with a thermostat coupled directly to the
centroid coordinate of each bead.
ThermoPILE_G: Holds the algorithms for a path-integral langevin equation
thermostat, with a thermostat coupled to the kinetic energy for
the entire system.
ThermoSVR: Holds the algorithms for a stochastic velocity rescaling
thermostat.
ThermoGLE: Holds the algorithms for a generalised langevin equation
thermostat.
ThermoNMGLE: Holds the algorithms for a generalised langevin equation
thermostat in the normal mode representation.
ThermoNMGLEG: Holds the algorithms for a generalised langevin equation
thermostat in the normal mode representation, with kinetic energy as
well as potential energy sampling optimization.
"""
__all__ = ['Thermostat', 'ThermoLangevin', 'ThermoPILE_L', 'ThermoPILE_G',
'ThermoSVR', 'ThermoGLE', 'ThermoNMGLE', 'ThermoNMGLEG']
import numpy as np
from ipi.utils.depend import *
from ipi.utils.units import *
from ipi.utils.mathtools import matrix_exp, stab_cholesky, root_herm
from ipi.utils.prng import Random
from ipi.utils.messages import verbosity, warning, info
from ipi.engine.beads import Beads
from ipi.engine.normalmodes import NormalModes
class Thermostat(dobject):
"""Base thermostat class.
Gives the standard methods and attributes needed in all the thermostat
classes.
Attributes:
prng: A pseudo random number generator object.
ndof: The number of degrees of freedom that the thermostat will be
attached to.
Depend objects:
dt: The time step used in the algorithms. Depends on the simulation dt.
temp: The simulation temperature. Higher than the system temperature by
a factor of the number of beads. Depends on the simulation temp.
ethermo: The total energy exchanged with the bath due to the thermostat.
p: The momentum vector that the thermostat is coupled to. Depends on the
beads p object.
m: The mass vector associated with p. Depends on the beads m object.
sm: The square root of the mass vector.
"""
def __init__(self, temp = 1.0, dt = 1.0, ethermo=0.0):
"""Initialises Thermostat.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialised from a checkpoint file.
"""
dset(self,"temp", depend_value(name='temp', value=temp))
dset(self,"dt", depend_value(name='dt', value=dt))
dset(self,"ethermo",depend_value(name='ethermo',value=ethermo))
def bind(self, beads=None, atoms=None, pm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Args:
beads: An optional beads object to take the mass and momentum vectors
from.
atoms: An optional atoms object to take the mass and momentum vectors
from.
pm: An optional tuple containing a single momentum value and its
conjugate mass.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no appropriate degree of freedom or object
containing a momentum vector is specified for
the thermostat to couple to.
"""
if prng is None:
warning("Initializing thermostat from standard random PRNG", verbosity.medium)
self.prng = Random()
else:
self.prng = prng
if not beads is None:
dset(self,"p",beads.p.flatten())
dset(self,"m",beads.m3.flatten())
elif not atoms is None:
dset(self,"p",dget(atoms, "p"))
dset(self,"m",dget(atoms, "m3"))
elif not pm is None:
dset(self,"p",pm[0])
dset(self,"m",pm[1])
else:
raise TypeError("Thermostat.bind expects either Beads, Atoms, NormalModes, or a (p,m) tuple to bind to")
if fixdof is None:
self.ndof = len(self.p)
else:
self.ndof = float(len(self.p) - fixdof)
dset(self, "sm",
depend_array(name="sm", value=np.zeros(len(dget(self,"m"))),
func=self.get_sm, dependencies=[dget(self,"m")]))
def get_sm(self):
"""Retrieves the square root of the mass matrix.
Returns:
A vector of the square root of the mass matrix with one value for
each degree of freedom.
"""
return np.sqrt(self.m)
def step(self):
"""Dummy thermostat step."""
pass
class ThermoLangevin(Thermostat):
"""Represents a langevin thermostat.
Depend objects:
tau: Thermostat damping time scale. Larger values give a less strongly
coupled thermostat.
T: Coefficient of the diffusive contribution of the thermostat, i.e. the
drift back towards equilibrium. Depends on tau and the time step.
S: Coefficient of the stochastic contribution of the thermostat, i.e.
the uncorrelated Gaussian noise. Depends on T and the temperature.
"""
def get_T(self):
"""Calculates the coefficient of the overall drift of the velocities."""
return np.exp(-0.5*self.dt/self.tau)
def get_S(self):
"""Calculates the coefficient of the white noise."""
return np.sqrt(Constants.kb*self.temp*(1 - self.T**2))
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0):
"""Initialises ThermoLangevin.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The thermostat damping timescale. Defaults to 1.0.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialised from a checkpoint file.
"""
super(ThermoLangevin,self).__init__(temp, dt, ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
dset(self,"T",
depend_value(name="T",func=self.get_T,
dependencies=[dget(self,"tau"), dget(self,"dt")]))
dset(self,"S",
depend_value(name="S",func=self.get_S,
dependencies=[dget(self,"temp"), dget(self,"T")]))
def step(self):
"""Updates the bound momentum vector with a langevin thermostat."""
p = depstrip(self.p).copy()
sm = depstrip(self.sm)
p /= sm
self.ethermo += np.dot(p,p)*0.5
p *= self.T
p += self.S*self.prng.gvec(len(p))
self.ethermo -= np.dot(p,p)*0.5
p *= sm
self.p = p
class ThermoPILE_L(Thermostat):
"""Represents a PILE thermostat with a local centroid thermostat.
Attributes:
_thermos: The list of the different thermostats for all the ring polymer
normal modes.
nm: A normal modes object to attach the thermostat to.
prng: Random number generator used in the stochastic integration
algorithms.
Depend objects:
tau: Centroid thermostat damping time scale. Larger values give a
less strongly coupled centroid thermostat.
tauk: Thermostat damping time scale for the non-centroid normal modes.
Depends on the ring polymer spring constant, and thus the simulation
temperature.
pilescale: A float used to reduce the intensity of the PILE thermostat if
required.
"""
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0, scale=1.0):
"""Initialises ThermoPILE_L.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The centroid thermostat damping timescale. Defaults to 1.0.
ethermo: The initial conserved energy quantity. Defaults to 0.0. Will
be non-zero if the thermostat is initialised from a checkpoint file.
scale: A float used to reduce the intensity of the PILE thermostat if
required.
Raises:
TypeError: Raised if the thermostat is used with any object other than
a beads object, so that we make sure that the objects needed for the
normal mode transformation exist.
"""
super(ThermoPILE_L,self).__init__(temp,dt,ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
dset(self,"pilescale",depend_value(value=scale,name='pilescale'))
def bind(self, nm=None, prng=None, bindcentroid=True, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes a beads object with degrees of freedom, and makes its momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Gives the interface for both the PILE_L and PILE_G thermostats, which
only differ in their treatment of the centroid coordinate momenta.
Args:
nm: An optional normal mode object to take the mass and momentum
vectors from.
prng: An optional pseudo random number generator object. Defaults to
Random().
bindcentroid: An optional boolean which decides whether a Langevin
thermostat is attached to the centroid mode of each atom
separately, or the total kinetic energy. Defaults to True, which
gives a thermostat bound to each centroid momentum.
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no appropriate degree of freedom or object
containing a momentum vector is specified for
the thermostat to couple to.
"""
if nm is None or not type(nm) is NormalModes:
raise TypeError("ThermoPILE_L.bind expects a NormalModes argument to bind to")
if prng is None:
self.prng = Random()
else:
self.prng = prng
prev_ethermo = self.ethermo
# creates a set of thermostats to be applied to individual normal modes
self._thermos = [ ThermoLangevin(temp=1, dt=1, tau=1) for b in range(nm.nbeads) ]
# optionally does not bind the centroid, so we can re-use all of this
# in the PILE_G case
if not bindcentroid:
self._thermos[0] = None
self.nm = nm
dset(self,"tauk",
depend_array(name="tauk", value=np.zeros(nm.nbeads-1,float),
func=self.get_tauk, dependencies=[dget(self,"pilescale"), dget(nm,"dynomegak")] ) )
# must pipe all the dependencies in such a way that values for the nm thermostats
# are automatically updated based on the "master" thermostat
def make_taugetter(k):
return lambda: self.tauk[k-1]
it = 0
for t in self._thermos:
if t is None:
it += 1
continue
if it > 0:
fixdof = None # only the centroid thermostat may have constraints
# bind thermostat t to the it-th bead
t.bind(pm=(nm.pnm[it,:],nm.dynm3[it,:]),prng=self.prng, fixdof=fixdof)
# pipes temp and dt
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
# for tau it is slightly more complex
if it == 0:
deppipe(self,"tau", t, "tau")
else:
# Here we manually connect _thermos[i].tau to tauk[i].
# Simple and clear.
dget(t,"tau").add_dependency(dget(self,"tauk"))
dget(t,"tau")._func = make_taugetter(it)
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
it += 1
# since the ethermo will be "delegated" to the normal modes thermostats,
# one has to split
# any previously-stored value between the sub-thermostats
if bindcentroid:
for t in self._thermos:
t.ethermo = prev_ethermo/nm.nbeads
dget(self,"ethermo")._func = self.get_ethermo;
# if we are not binding the centroid just yet, this bit of the piping
# is delegated to the function which is actually calling this
def get_tauk(self):
"""Computes the thermostat damping time scale for the non-centroid
normal modes.
Returns:
An array with the damping time scales for the non-centroid modes.
"""
# Also include an optional scaling factor to reduce the intensity of NM thermostats
return np.array([ self.pilescale/(2*self.nm.dynomegak[k]) for k in range(1,len(self._thermos)) ])
def get_ethermo(self):
"""Computes the total energy transferred to the heat bath for all the
thermostats.
"""
et = 0.0;
for t in self._thermos:
et += t.ethermo
return et
def step(self):
"""Updates the bound momentum vector with a PILE thermostat."""
# super-cool! just loop over the thermostats! it's as easy as that!
for t in self._thermos:
t.step()
class ThermoSVR(Thermostat):
"""Represents a stochastic velocity rescaling thermostat.
Depend objects:
tau: Centroid thermostat damping time scale. Larger values give a
less strongly coupled centroid thermostat.
K: Scaling factor for the total kinetic energy. Depends on the
temperature.
et: Parameter determining the strength of the thermostat coupling.
Depends on tau and the time step.
"""
def get_et(self):
"""Calculates the damping term in the propagator."""
return np.exp(-0.5*self.dt/self.tau)
def get_K(self):
"""Calculates the average kinetic energy per degree of freedom."""
return Constants.kb*self.temp*0.5
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0):
"""Initialises ThermoSVR.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The thermostat damping timescale. Defaults to 1.0.
ethermo: The initial conserved energy quantity. Defaults to 0.0. Will
be non-zero if the thermostat is initialised from a checkpoint file.
"""
super(ThermoSVR,self).__init__(temp,dt,ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
dset(self,"et",
depend_value(name="et",func=self.get_et,
dependencies=[dget(self,"tau"), dget(self,"dt")]))
dset(self,"K",
depend_value(name="K",func=self.get_K, dependencies=[dget(self,"temp")]))
def step(self):
"""Updates the bound momentum vector with a stochastic velocity rescaling
thermostat. See G Bussi, D Donadio, M Parrinello,
Journal of Chemical Physics 126, 014101 (2007)
"""
K = np.dot(depstrip(self.p),depstrip(self.p)/depstrip(self.m))*0.5
# rescaling is un-defined if the KE is zero
if K == 0.0:
return
# gets the stochastic term (basically a Gamma distribution for the kinetic energy)
r1 = self.prng.g
if (self.ndof-1)%2 == 0:
rg = 2.0*self.prng.gamma((self.ndof-1)/2)
else:
rg = 2.0*self.prng.gamma((self.ndof-2)/2) + self.prng.g**2
alpha2 = self.et + self.K/K*(1 - self.et)*(r1**2 + rg) + 2.0*r1*np.sqrt(self.K/K*self.et*(1 - self.et))
alpha = np.sqrt(alpha2)
if (r1 + np.sqrt(2*K/self.K*self.et/(1 - self.et))) < 0:
alpha *= -1
self.ethermo += K*(1 - alpha2)
self.p *= alpha
class ThermoPILE_G(ThermoPILE_L):
"""Represents a PILE thermostat with a global centroid thermostat.
Simply replaces the Langevin thermostat for the centroid normal mode with
a global velocity rescaling thermostat.
"""
def __init__(self, temp = 1.0, dt = 1.0, tau = 1.0, ethermo=0.0, scale = 1.0):
"""Initialises ThermoPILE_G.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
tau: The centroid thermostat damping timescale. Defaults to 1.0.
ethermo: The initial conserved energy quantity. Defaults to 0.0. Will
be non-zero if the thermostat is initialised from a checkpoint file.
scale: A float used to reduce the intensity of the PILE thermostat if
required.
"""
super(ThermoPILE_G,self).__init__(temp,dt,tau,ethermo)
dset(self,"pilescale",depend_value(value=scale,name='pilescale'))
def bind(self, nm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes a beads object with degrees of freedom, and makes its momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Uses the PILE_L bind interface, with bindcentroid set to false so we can
specify that thermostat separately, by binding a global
thermostat to the centroid mode.
Args:
beads: An optional beads object to take the mass and momentum vectors
from.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
"""
# first binds as a local PILE, then substitutes the thermostat on the centroid
prev_ethermo = self.ethermo
super(ThermoPILE_G,self).bind(nm=nm,prng=prng,bindcentroid=False, fixdof=fixdof)
#centroid thermostat
self._thermos[0] = ThermoSVR(temp=1, dt=1, tau=1)
t = self._thermos[0]
t.bind(pm=(nm.pnm[0,:],nm.dynm3[0,:]),prng=self.prng, fixdof=fixdof)
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
deppipe(self,"tau", t, "tau")
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
# splits any previous ethermo between the thermostats, and finishes to bind ethermo to the sum function
for t in self._thermos:
t.ethermo = prev_ethermo/nm.nbeads
dget(self,"ethermo")._func = self.get_ethermo;
class ThermoGLE(Thermostat):
"""Represents a GLE thermostat.
This is similar to a langevin thermostat, in that it uses Gaussian random
numbers to simulate a heat bath acting on the system, but simulates a
non-Markovian system by using a Markovian formulation in an extended phase
space. This allows for a much greater degree of flexibility, and this
thermostat, properly fitted, can give the an approximation to the correct
quantum ensemble even for a classical, 1-bead simulation. More reasonably,
using this thermostat allows for a far smaller number of replicas of the
system to be used, as the convergence of the properties
of the system is accelerated with respect to number of beads when PI+GLE
are used in combination. (See M. Ceriotti, D. E. Manolopoulos, M. Parinello,
J. Chem. Phys. 134, 084104 (2011)).
Attributes:
ns: The number of auxilliary degrees of freedom.
s: An array holding all the momenta, including the ones for the
auxilliary degrees of freedom.
Depend objects:
A: Drift matrix giving the damping time scales for all the different
degrees of freedom.
C: Static covariance matrix.
Satisfies A.C + C.transpose(A) = B.transpose(B), where B is the
diffusion matrix, giving the strength of the coupling of the system
with the heat bath, and thus the size of the stochastic
contribution of the thermostat.
T: Matrix for the diffusive contribution of the thermostat, i.e. the
drift back towards equilibrium. Depends on A and the time step.
S: Matrix for the stochastic contribution of the thermostat, i.e.
the uncorrelated Gaussian noise. Depends on C and T.
"""
def get_T(self):
"""Calculates the matrix for the overall drift of the velocities."""
return matrix_exp(-0.5*self.dt*self.A)
def get_S(self):
"""Calculates the matrix for the coloured noise."""
SST = Constants.kb*(self.C - np.dot(self.T,np.dot(self.C,self.T.T)))
# Uses a symetric decomposition rather than Cholesky, since it is more stable
return root_herm(SST)
def get_C(self):
"""Calculates C from temp (if C is not set explicitly)"""
rC = np.identity(self.ns + 1,float)*self.temp
return rC[:]
def __init__(self, temp = 1.0, dt = 1.0, A = None, C = None, ethermo=0.0):
"""Initialises ThermoGLE.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
A: An optional matrix giving the drift matrix. Defaults to a single
value of 1.0.
C: An optional matrix giving the covariance matrix. Defaults to an
identity matrix times temperature with the same dimensions as the
total number of degrees of freedom in the system.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialised from a checkpoint file.
"""
super(ThermoGLE,self).__init__(temp,dt,ethermo)
if A is None:
A = np.identity(1,float)
dset(self,"A",depend_value(value=A.copy(),name='A'))
self.ns = len(self.A) - 1;
# now, this is tricky. if C is taken from temp, then we want it to be updated
# as a depend of temp. Otherwise, we want it to be an independent beast.
if C is None:
C = np.identity(self.ns+1,float)*self.temp
dset(self,"C",
depend_value(name='C', func=self.get_C,
dependencies=[dget(self,"temp")]))
else:
dset(self,"C",depend_value(value=C.copy(),name='C'))
dset(self,"T",
depend_value(name="T",func=self.get_T,
dependencies=[dget(self,"A"), dget(self,"dt")]))
dset(self,"S",
depend_value(name="S",func=self.get_S,
dependencies=[dget(self,"C"), dget(self,"T")]))
self.s = np.zeros(0)
def bind(self, beads=None, atoms=None, pm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network.
Args:
beads: An optional beads object to take the mass and momentum vectors
from.
atoms: An optional atoms object to take the mass and momentum vectors
from.
pm: An optional tuple containing a single momentum value and its
conjugate mass.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no appropriate degree of freedom or object
containing a momentum vector is specified for
the thermostat to couple to.
"""
super(ThermoGLE,self).bind(beads,atoms,pm,prng,fixdof)
# allocates, initializes or restarts an array of s's
if self.s.shape != (self.ns + 1, len(dget(self,"m"))):
if len(self.s) > 0:
warning("Mismatch in GLE s array size on restart, will reinitialise to free particle.", verbosity.low)
self.s = np.zeros((self.ns + 1, len(dget(self,"m"))))
# Initializes the s vector in the free-particle limit
info(" GLE additional DOFs initialised to the free-particle limit.", verbosity.low)
SC = stab_cholesky(self.C*Constants.kb)
self.s[:] = np.dot(SC, self.prng.gvec(self.s.shape))
else:
info("GLE additional DOFs initialised from input.", verbosity.medium)
def step(self):
"""Updates the bound momentum vector with a GLE thermostat"""
p = depstrip(self.p).copy()
self.s[0,:] = self.p/self.sm
self.ethermo += np.dot(self.s[0],self.s[0])*0.5
self.s[:] = np.dot(self.T,self.s) + np.dot(self.S,self.prng.gvec(self.s.shape))
self.ethermo -= np.dot(self.s[0],self.s[0])*0.5
self.p = self.s[0]*self.sm
class ThermoNMGLE(Thermostat):
"""Represents a 'normal-modes' GLE thermostat.
An extension to the GLE thermostat which is applied in the
normal modes representation, and which allows to use a different
GLE for each normal mode
Attributes:
ns: The number of auxilliary degrees of freedom.
nb: The number of beads.
s: An array holding all the momenta, including the ones for the
auxilliary degrees of freedom.
Depend objects:
A: Drift matrix giving the damping time scales for all the different
degrees of freedom (must contain nb terms).
C: Static covariance matrix.
Satisfies A.C + C.transpose(A) = B.transpose(B), where B is the
diffusion matrix, giving the strength of the coupling of the system
with the heat bath, and thus the size of the stochastic
contribution of the thermostat.
"""
def get_C(self):
"""Calculates C from temp (if C is not set explicitly)."""
rv = np.ndarray((self.nb, self.ns+1, self.ns+1), float)
for b in range(0,self.nb):
rv[b] = np.identity(self.ns + 1,float)*self.temp
return rv[:]
def __init__(self, temp = 1.0, dt = 1.0, A = None, C = None, ethermo=0.0):
"""Initialises ThermoGLE.
Args:
temp: The simulation temperature. Defaults to 1.0.
dt: The simulation time step. Defaults to 1.0.
A: An optional matrix giving the drift matrix. Defaults to a single
value of 1.0.
C: An optional matrix giving the covariance matrix. Defaults to an
identity matrix times temperature with the same dimensions as the
total number of degrees of freedom in the system.
ethermo: The initial heat energy transferred to the bath.
Defaults to 0.0. Will be non-zero if the thermostat is
initialised from a checkpoint file.
"""
super(ThermoNMGLE,self).__init__(temp,dt,ethermo)
if A is None:
A = np.identity(1,float)
dset(self,"A",depend_value(value=A.copy(),name='A'))
self.nb = len(self.A)
self.ns = len(self.A[0]) - 1;
# now, this is tricky. if C is taken from temp, then we want it to be
# updated as a depend of temp.
# Otherwise, we want it to be an independent beast.
if C is None:
dset(self,"C",depend_value(name='C', func=self.get_C, dependencies=[dget(self,"temp")]))
else:
dset(self,"C",depend_value(value=C.copy(),name='C'))
def bind(self, nm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network. Actually, this specific thermostat requires
being called on a beads object.
Args:
nm: An optional normal modes object to take the mass and momentum
vectors from.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
Raises:
TypeError: Raised if no beads object is specified for
the thermostat to couple to.
"""
if nm is None or not type(nm) is NormalModes:
raise TypeError("ThermoNMGLE.bind expects a NormalModes argument to bind to")
if prng is None:
self.prng = Random()
else:
self.prng = prng
if (nm.nbeads != self.nb):
raise IndexError("The parameters in nm_gle options correspond to a bead number "+str(self.nb)+ " which does not match the number of beads in the path" + str(nm.nbeads) )
# allocates, initializes or restarts an array of s's
if self.s.shape != (self.nb, self.ns + 1, nm.natoms *3) :
if len(self.s) > 0:
warning("Mismatch in GLE s array size on restart, will reinitialise to free particle.", verbosity.low)
self.s = np.zeros((self.nb, self.ns + 1, nm.natoms*3))
# Initializes the s vector in the free-particle limit
info(" GLE additional DOFs initialised to the free-particle limit.", verbosity.low)
for b in range(self.nb):
SC = stab_cholesky(self.C[b]*Constants.kb)
self.s[b] = np.dot(SC, self.prng.gvec(self.s[b].shape))
else:
info("GLE additional DOFs initialised from input.", verbosity.medium)
prev_ethermo = self.ethermo
# creates a set of thermostats to be applied to individual normal modes
self._thermos = [ThermoGLE(temp=1, dt=1, A=self.A[b], C=self.C[b]) for b in range(self.nb)]
# must pipe all the dependencies in such a way that values for the nm
# thermostats are automatically updated based on the "master" thermostat
def make_Agetter(k):
return lambda: self.A[k]
def make_Cgetter(k):
return lambda: self.C[k]
it = 0
for t in self._thermos:
t.s = self.s[it] # gets the s's as a slice of self.s
t.bind(pm=(nm.pnm[it,:],nm.dynm3[it,:]), prng=self.prng) # bind thermostat t to the it-th normal mode
# pipes temp and dt
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
# here we pipe the A and C of individual NM to the "master" arrays
dget(t,"A").add_dependency(dget(self,"A"))
dget(t,"A")._func = make_Agetter(it)
dget(t,"C").add_dependency(dget(self,"C"))
dget(t,"C")._func = make_Cgetter(it)
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
it += 1
# since the ethermo will be "delegated" to the normal modes thermostats,
# one has to split
# any previously-stored value between the sub-thermostats
for t in self._thermos:
t.ethermo = prev_ethermo/self.nb
dget(self,"ethermo")._func = self.get_ethermo;
def step(self):
"""Updates the thermostat in NM representation by looping over the
individual DOFs.
"""
for t in self._thermos:
t.step()
def get_ethermo(self):
"""Computes the total energy transferred to the heat bath for all the nm
thermostats.
"""
et = 0.0;
for t in self._thermos:
et += t.ethermo
return et
class ThermoNMGLEG(ThermoNMGLE):
"""Represents a 'normal-modes' GLE thermostat + SVR.
An extension to the above NMGLE thermostat which also adds a stochastic velocity
rescaling to the centroid.
Depend objects:
tau: Thermostat damping time scale. Larger values give a less strongly
coupled thermostat.
"""
def __init__(self, temp = 1.0, dt = 1.0, A = None, C = None, tau=1.0, ethermo=0.0):
super(ThermoNMGLEG,self).__init__(temp, dt, A, C, ethermo)
dset(self,"tau",depend_value(value=tau,name='tau'))
def bind(self, nm=None, prng=None, fixdof=None):
"""Binds the appropriate degrees of freedom to the thermostat.
This takes an object with degrees of freedom, and makes their momentum
and mass vectors members of the thermostat. It also then creates the
objects that will hold the data needed in the thermostat algorithms
and the dependency network. Actually, this specific thermostat requires
being called on a beads object.
Args:
nm: An optional normal modes object to take the mass and momentum
vectors from.
prng: An optional pseudo random number generator object. Defaults to
Random().
fixdof: An optional integer which can specify the number of constraints
applied to the system. Defaults to zero.
"""
super(ThermoNMGLEG,self).bind(nm, prng, fixdof)
t = ThermoSVR(self.temp, self.dt, self.tau)
t.bind(pm=(nm.pnm[0,:],nm.dynm3[0,:]), prng=self.prng) # bind global thermostat to centroid
# pipes temp and dt
deppipe(self,"temp", t, "temp")
deppipe(self,"dt", t, "dt")
deppipe(self,"tau", t, "tau")
dget(self,"ethermo").add_dependency(dget(t,"ethermo"))
self._thermos.append(t)
|
quang-ha/lammps
|
tools/i-pi/ipi/engine/thermostats.py
|
Python
|
gpl-2.0
| 34,428
|
[
"Gaussian"
] |
77cecca07b6bc4bb9719b2a55775231a7c98af27da62951200b22cf7f6cffa7f
|
import gzip
import pysam
import timeit
iterations = 5
repeats = 100
print ("repeats=", repeats, "iterations=", iterations)
fn_compressed = '/tmp/windows_small.bed.gz'
fn_uncompressed = '/tmp/windows_small.bed'
def test_python_compressed():
'''iterate through with python.'''
f = gzip.open( fn_compressed)
l = len( [x.encode().split("\t") for x in f])
def test_python_uncompressed():
'''iterate through with python.'''
f = open( "windows_small.bed")
l = len( [x.split("\t") for x in f])
def test_fetch_plain():
"""Stupid test function"""
f = pysam.Tabixfile(fn_compressed)
l = len( list(f.fetch()) )
def test_fetch_parsed():
"""Stupid test function"""
f = pysam.Tabixfile(fn_compressed)
l = len( list(f.fetch( parser = pysam.asBed())) )
def test_iterator_generic_compressed():
f = gzip.open(fn_compressed)
l = len( list( pysam.tabix_generic_iterator( f, parser = pysam.asBed() )))
def test_iterator_generic_uncompressed():
f = open("windows_small.bed")
l = len( list( pysam.tabix_generic_iterator( f, parser = pysam.asBed() )))
def test_iterator_parsed_compressed():
f = gzip.open(fn_compressed)
l = len( list( pysam.tabix_iterator( f, parser = pysam.asBed() )))
def test_iterator_parsed_uncompressed():
f = open("windows_small.bed")
l = len( list( pysam.tabix_iterator( f, parser = pysam.asBed() )))
def test_iterator_file_compressed():
f = gzip.open("windows_small.bed")
l = len( list( pysam.tabix_file_iterator( f, parser = pysam.asBed() )))
def test_iterator_file_uncompressed():
f = open("windows_small.bed")
l = len( list( pysam.tabix_file_iterator( f, parser = pysam.asBed() )))
tests = ( test_python_compressed,
test_python_uncompressed,
test_fetch_plain,
test_fetch_parsed,
test_iterator_generic_compressed,
test_iterator_generic_uncompressed,
test_iterator_parsed_compressed,
test_iterator_parsed_uncompressed,
test_iterator_file_compressed,
test_iterator_file_uncompressed )
for repeat in range( repeats ):
print ("# repeat=", repeat)
for test in tests:
try:
t = timeit.timeit( test, number = iterations )
except AttributeError:
continue
print ("%5.2f\t%s" % (t,str(test)))
|
daler/pysam
|
benchmark/tabix_bench.py
|
Python
|
mit
| 2,349
|
[
"pysam"
] |
a1ceea711394112ea296af62cded12f7e98d25c28c099c191a0403da0480fdde
|
from lab02.Neuron import Neuron
class NeuralNetwork:
def __init__(self, population, size):
self.size = size
self.network = []
for i in range(size):
self.network.append(Neuron(population))
def train(self, learning_set, epoch, learning_rate):
for i in range(epoch):
for el in learning_set:
s = self.neural_feed(el)
id_winner = self.find_winner_neuron(s)
self.learn_winner(id_winner, el, learning_rate)
def neural_feed(self, feed_set):
result = []
for neuron in self.network:
result.append(neuron.feed(feed_set))
return result
def find_winner_neuron(self, feed_res):
index = None
result = -10
for i, neuron in enumerate(self.network):
f = neuron.feed(feed_res)
if f >= result:
result = f
index = i
return index
def learn_winner(self, id, set, rate):
self.network[id].update_weight(set, rate)
def neural_respond(self, set):
s = self.neural_feed(set)
id = self.find_winner_neuron(s)
respond = self.size*[0]
respond[id] = 1
return respond
if __name__ == "__main__":
file = open("iris.data")
data = file.read()
file.close()
data = [row.split(',') for row in data.split("\n")]
training_data = [[float(c) for c in row[:-1]] for row in data if len(row) > 1]
test_set = [[5.1, 3.5, 1.4, 0.2],
[7.0, 3.2, 4.7, 1.4],
[6.3, 3.3, 6.0, 2.5]
]
network = NeuralNetwork(4, 3)
network.train(training_data, 500, 0.1)
print(network.neural_respond(test_set[0]))
print(network.neural_respond(test_set[1]))
print(network.neural_respond(test_set[2]))
|
lucekdudek/si
|
lab03/NauralNetwork.py
|
Python
|
apache-2.0
| 1,831
|
[
"NEURON"
] |
388e1b96413e6d1e1a35617fe830ed1acc4593f49ae6f19a4325229b99240c9f
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from customeditor import CustomEditor
from camelot.view.art import Icon
default_icon_names = [
'face-angel',
'face-crying',
'face-devilish',
'face-glasses',
'face-grin',
'face-kiss',
'face-monkey',
'face-plain',
'face-sad',
'face-smile',
'face-smile-big',
'face-surprise',
'face-wink',
]
default_icons = list( (icon_name, Icon('tango/16x16/emotes/%s.png'%icon_name)) for icon_name in default_icon_names)
class SmileyEditor(CustomEditor):
def __init__(self,
parent,
editable = True,
icons = default_icons,
field_name = 'icons',
**kwargs):
CustomEditor.__init__(self, parent)
self.setObjectName( field_name )
self.box = QtGui.QComboBox()
self.box.setFrame(True)
self.box.setEditable(False)
self.name_by_position = {0:None}
self.position_by_name = {None:0}
self.box.addItem('')
for i,(icon_name, icon) in enumerate(icons):
self.name_by_position[i+1] = icon_name
self.position_by_name[icon_name] = i+1
self.box.addItem(icon.getQIcon(), '')
self.box.setFixedHeight(self.get_height())
self.setFocusPolicy(Qt.StrongFocus)
layout = QtGui.QHBoxLayout(self)
layout.setContentsMargins( 0, 0, 0, 0)
layout.setSpacing(0)
self.setAutoFillBackground(True)
if not editable:
self.box.setEnabled(False)
else:
self.box.setEnabled(True)
self.box.activated.connect( self.smiley_changed )
layout.addWidget(self.box)
layout.addStretch()
self.setLayout(layout)
def get_value(self):
position = self.box.currentIndex()
return CustomEditor.get_value(self) or self.name_by_position[position]
def set_enabled(self, editable=True):
self.box.setEnabled(editable)
@QtCore.pyqtSlot( int )
def smiley_changed(self, _index ):
self.editingFinished.emit()
def set_value(self, value):
name = CustomEditor.set_value(self, value)
self.box.setCurrentIndex( self.position_by_name[name] )
|
jeroendierckx/Camelot
|
camelot/view/controls/editors/smileyeditor.py
|
Python
|
gpl-2.0
| 3,375
|
[
"VisIt"
] |
6f14f1cbfe5bf1d23cf13a08446844a57016b957f1eed6bc749cbe89c487b95c
|
"""
../wps.py?request=execute
&service=wps
&version=1.0.0
&identifier=esmvaltool-perfmetrics
&status=true
&storeExecuteResponse=true
"""
import datetime
import shutil
import netCDF4
import urlparse
from pywps.Process import WPSProcess
import os
import logging
from jinja2 import FileSystemLoader, Environment,select_autoescape
import glob
class Process(WPSProcess):
def __init__(self):
# init process
WPSProcess.__init__(self,
identifier="esmvaltool-clouds", # the same as the file name
version="1.0",
title="Clouds Diagnostics",
storeSupported="True",
statusSupported="True",
abstract="Create Cloud diagnostics using ESMValTool (takes about 2 minutes).",
grassLocation=False)
self.startYear = self.addLiteralInput(identifier="startYear",
title="First year data used in plot",
type="Integer",
default=2003,
minOccurs=1,
maxOccurs=1)
self.endYear = self.addLiteralInput(identifier="endYear",
title="Last year data used in plot",
type="Integer",
default=2005,
minOccurs=1,
maxOccurs=1)
# self.opendapURL = self.addLiteralOutput(identifier="opendapURL",
# title="opendapURL",
# type="String", )
self.plots = []
for i in range (0,8):
self.plots.append(self.addComplexOutput(identifier = "plot%d" % i,
title = "Plot",
formats = [
{"mimeType":"image/png"}
]))
def execute(self):
self.status.set("starting", 0)
#print some debugging info
start_year = self.startYear.getValue()
end_year = self.endYear.getValue()
# This does not work atm.
# This allows the NetCDF library to find the users credentials (X509 cert)
# Set current working directory to user HOME dir
os.chdir(os.environ['HOME'])
# Create output folder name
output_folder_name = "WPS_" + self.identifier + "_" + datetime.datetime.now().strftime("%Y%m%dT%H%M%SZ")
logging.debug(os.environ['POF_OUTPUT_PATH'])
#OpenDAP Url prefix (hosted by portal)
output_folder_url = os.environ['POF_OUTPUT_URL'] + output_folder_name
#Filesystem output path
output_folder_path = os.path.join(os.environ['POF_OUTPUT_PATH'], output_folder_name)
logging.debug("output folder path is %s" % output_folder_path)
#Create output directory
if not os.path.exists(output_folder_path):
os.makedirs(output_folder_path)
#copy input files to scratch (in correct folders for esmvaltool)
#next, copy input netcdf to a location esmvaltool expects
# example cmpi5 esgf link
# http://esgf-data1.ceda.ac.uk/thredds/dodsC/esg_dataroot/cmip5/output1/CSIRO-BOM/ACCESS1-0/historical/mon/atmos/Amon/r1i1p1/v1/tas/tas_Amon_ACCESS1-0_historical_r1i1p1_185001-200512.nc
# esmvaltool data folder example
# ETHZ_CMIP5/historical/Amon/ta/bcc-csm1-1/r1i1p1/ta_Amon_bcc-csm1-1_historical_r1i1p1_200001-200212.nc
#description = <model> SOME DESCRIPTION FIELDS HERE </model>
self.status.set("setting up namelist for esmvaltool", 10)
#create esmvaltool config (using template)
environment = Environment(loader=FileSystemLoader('/namelists'))
#autoescape=select_autoescape(['html', 'xml']))
template = environment.get_template('namelist_clouds.xml')
generated_namelist = template.render(work_dir=output_folder_path)
logging.debug("template output = %s" % generated_namelist)
#write generated namelist to file
namelist_path = output_folder_path + "/" + 'namelist.xml'
namelist_fd = open(namelist_path, 'w')
namelist_fd.write(generated_namelist)
namelist_fd.close()
#run esmvaltool command
self.status.set("running esmvaltool", 20)
os.chdir('/src/ESMValTool')
self.cmd(['python', 'main.py', namelist_path])
#grep output from output folder
self.status.set("processing output", 90)
output_images = sorted(glob.glob(output_folder_path + "/clouds*/*.png"))
for i in range(0, len(output_images)):
image = output_images[i]
logging.debug("output image path is %s" % image)
# rel_output_image = os.path.relpath(output_image, output_folder_path)
# plot_url = output_folder_url + "/" + rel_output_image
self.plots[i].setValue(image)
#KNMI WPS Specific Set output
self.status.set("ready", 100);
|
c3s-magic/adaguc-services-esmvaltool-wps
|
processes/esmvaltool-clouds.py
|
Python
|
apache-2.0
| 5,199
|
[
"NetCDF"
] |
36c937b08524eb1e1fb1fb337bf56b5f2c9bb0e97d446a432a83f7c95ddda65e
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
""" Enable using one or more Storage Elements
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
read = True
write = True
check = True
remove = True
site = ''
mute = False
Script.setUsageMessage( """
Enable using one or more Storage Elements
Usage:
%s SE1 [SE2 ...]
""" % Script.scriptName )
Script.registerSwitch( "r" , "AllowRead" , " Allow only reading from the storage element" )
Script.registerSwitch( "w" , "AllowWrite", " Allow only writing to the storage element" )
Script.registerSwitch( "k" , "AllowCheck", " Allow only check access to the storage element" )
Script.registerSwitch( "v" , "AllowRemove", " Allow only remove access to the storage element" )
Script.registerSwitch( "m" , "Mute" , " Do not send email" )
Script.registerSwitch( "S:", "Site=" , " Allow all SEs associated to site" )
Script.parseCommandLine( ignoreErrors = True )
ses = Script.getPositionalArgs()
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "r" or switch[0].lower() == "allowread":
write = False
check = False
remove = False
if switch[0].lower() == "w" or switch[0].lower() == "allowwrite":
read = False
check = False
remove = False
if switch[0].lower() == "k" or switch[0].lower() == "allowcheck":
read = False
write = False
remove = False
if switch[0].lower() == "v" or switch[0].lower() == "allowremove":
read = False
write = False
check = False
if switch[0].lower() == "m" or switch[0].lower() == "mute":
mute = True
if switch[0] == "S" or switch[0].lower() == "site":
site = switch[1]
# from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC import gConfig, gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
# csAPI = CSAPI()
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
setup = gConfig.getValue( '/DIRAC/Setup', '' )
if not setup:
print 'ERROR: Could not contact Configuration Service'
exitCode = 2
DIRAC.exit( exitCode )
res = getProxyInfo()
if not res[ 'OK' ]:
gLogger.error( 'Failed to get proxy information', res[ 'Message' ] )
DIRAC.exit( 2 )
userName = res['Value'].get( 'username' )
if not userName:
gLogger.error( 'Failed to get username for proxy' )
DIRAC.exit( 2 )
if site:
res = gConfig.getOptionsDict( '/Resources/Sites/LCG/%s' % site )
if not res[ 'OK' ]:
gLogger.error( 'The provided site (%s) is not known.' % site )
DIRAC.exit( -1 )
ses.extend( res[ 'Value' ][ 'SE' ].replace( ' ', '' ).split( ',' ) )
if not ses:
gLogger.error( 'There were no SEs provided' )
DIRAC.exit()
readAllowed = []
writeAllowed = []
checkAllowed = []
removeAllowed = []
resourceStatus = ResourceStatus()
res = resourceStatus.getStorageElementStatus( ses )
if not res[ 'OK' ]:
gLogger.error( 'Storage Element %s does not exist' % ses )
DIRAC.exit( -1 )
reason = 'Forced with dirac-admin-allow-se by %s' % userName
for se, seOptions in res[ 'Value' ].items():
resW = resC = resR = { 'OK' : False }
# InActive is used on the CS model, Banned is the equivalent in RSS
if read and seOptions.has_key( 'ReadAccess' ):
if not seOptions[ 'ReadAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Read option for %s is %s, instead of %s' %
( se, seOptions[ 'ReadAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
if 'ARCHIVE' in se:
gLogger.notice( '%s is not supposed to change Read status to Active' % se )
resR[ 'OK' ] = True
else:
resR = resourceStatus.setStorageElementStatus( se, 'ReadAccess', 'Active', reason, userName )
if not resR['OK']:
gLogger.error( "Failed to update %s read access to Active" % se )
else:
gLogger.notice( "Successfully updated %s read access to Active" % se )
readAllowed.append( se )
# InActive is used on the CS model, Banned is the equivalent in RSS
if write and seOptions.has_key( 'WriteAccess' ):
if not seOptions[ 'WriteAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Write option for %s is %s, instead of %s' %
( se, seOptions[ 'WriteAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
resW = resourceStatus.setStorageElementStatus( se, 'WriteAccess', 'Active', reason, userName )
if not resW['OK']:
gLogger.error( "Failed to update %s write access to Active" % se )
else:
gLogger.notice( "Successfully updated %s write access to Active" % se )
writeAllowed.append( se )
# InActive is used on the CS model, Banned is the equivalent in RSS
if check and seOptions.has_key( 'CheckAccess' ):
if not seOptions[ 'CheckAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Check option for %s is %s, instead of %s' %
( se, seOptions[ 'CheckAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
resC = resourceStatus.setStorageElementStatus( se, 'CheckAccess', 'Active', reason, userName )
if not resC['OK']:
gLogger.error( "Failed to update %s check access to Active" % se )
else:
gLogger.notice( "Successfully updated %s check access to Active" % se )
checkAllowed.append( se )
# InActive is used on the CS model, Banned is the equivalent in RSS
if remove and seOptions.has_key( 'RemoveAccess' ):
if not seOptions[ 'RemoveAccess' ] in [ "InActive", "Banned", "Probing", "Degraded" ]:
gLogger.notice( 'Remove option for %s is %s, instead of %s' %
( se, seOptions[ 'RemoveAccess' ], [ "InActive", "Banned", "Probing", "Degraded" ] ) )
gLogger.notice( 'Try specifying the command switches' )
continue
resC = resourceStatus.setStorageElementStatus( se, 'RemoveAccess', 'Active', reason, userName )
if not resC['OK']:
gLogger.error( "Failed to update %s remove access to Active" % se )
else:
gLogger.notice( "Successfully updated %s remove access to Active" % se )
removeAllowed.append( se )
if not( resR['OK'] or resW['OK'] or resC['OK'] ):
DIRAC.exit( -1 )
if not ( writeAllowed or readAllowed or checkAllowed or removeAllowed ):
gLogger.info( "No storage elements were allowed" )
DIRAC.exit( -1 )
if mute:
gLogger.notice( 'Email is muted by script switch' )
DIRAC.exit( 0 )
subject = '%s storage elements allowed for use' % len( writeAllowed + readAllowed + checkAllowed + removeAllowed )
addressPath = 'EMail/Production'
address = Operations().getValue( addressPath, '' )
body = ''
if read:
body = "%s\n\nThe following storage elements were allowed for reading:" % body
for se in readAllowed:
body = "%s\n%s" % ( body, se )
if write:
body = "%s\n\nThe following storage elements were allowed for writing:" % body
for se in writeAllowed:
body = "%s\n%s" % ( body, se )
if check:
body = "%s\n\nThe following storage elements were allowed for checking:" % body
for se in checkAllowed:
body = "%s\n%s" % ( body, se )
if remove:
body = "%s\n\nThe following storage elements were allowed for removing:" % body
for se in removeAllowed:
body = "%s\n%s" % ( body, se )
if not address:
gLogger.notice( "'%s' not defined in Operations, can not send Mail\n" % addressPath, body )
DIRAC.exit( 0 )
res = diracAdmin.sendMail( address, subject, body )
gLogger.notice( 'Notifying %s' % address )
if res[ 'OK' ]:
gLogger.notice( res[ 'Value' ] )
else:
gLogger.notice( res[ 'Message' ] )
DIRAC.exit( 0 )
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
marcelovilaca/DIRAC
|
DataManagementSystem/scripts/dirac-admin-allow-se.py
|
Python
|
gpl-3.0
| 8,412
|
[
"DIRAC"
] |
ef7c42781756c7388c657b86623ad8de9ea95b816177494aee1847e40dc358bd
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to upload images to wikipedia.
Arguments:
-keep Keep the filename as is
-filename Target filename without the namespace prefix
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
-always Don't ask the user anything. This will imply -keep and
-noverify and require that either -abortonwarn or -ignorewarn
is defined for all. It will also require a valid file name and
description. It'll only overwrite files if -ignorewarn includes
the 'exists' warning.
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary Pick a custom edit summary for the bot.
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
should ignore specific warnings and abort on the rest it's possible by defining
no warning for -abortonwarn and the specific warnings for -ignorewarn. The
order does not matter. If both are unspecific or a warning is specified by
both, it'll prefer aborting.
If any other arguments are given, the first is either URL, filename or
directory to upload, and the rest is a proposed description to go with the
upload. If none of these are given, the user is asked for the directory, file
or URL to upload. The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a
parameter, and for a description.
"""
#
# (C) Rob W.W. Hooft, Andre Engels 2003-2004
# (C) Pywikibot team, 2003-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import math
import os
import re
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.specialbots import UploadRobot
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
url = u''
description = []
summary = None
keepFilename = False
always = False
useFilename = None
verifyDescription = True
aborts = set()
ignorewarn = set()
chunk_size = 0
chunk_size_regex = r'^-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$'
chunk_size_regex = re.compile(chunk_size_regex, re.I)
recursive = False
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
for arg in pywikibot.handle_args(args):
if arg:
if arg == '-always':
keepFilename = True
always = True
verifyDescription = False
elif arg == '-recursive':
recursive = True
elif arg.startswith('-keep'):
keepFilename = True
elif arg.startswith('-filename:'):
useFilename = arg[10:]
elif arg.startswith('-summary'):
summary = arg[9:]
elif arg.startswith('-noverify'):
verifyDescription = False
elif arg.startswith('-abortonwarn'):
if len(arg) > len('-abortonwarn:') and aborts is not True:
aborts.add(arg[len('-abortonwarn:'):])
else:
aborts = True
elif arg.startswith('-ignorewarn'):
if len(arg) > len('-ignorewarn:') and ignorewarn is not True:
ignorewarn.add(arg[len('-ignorewarn:'):])
else:
ignorewarn = True
elif arg.startswith('-chunked'):
match = chunk_size_regex.match(arg)
if match:
if match.group(1): # number was in there
base = float(match.group(1))
if match.group(2): # suffix too
suffix = match.group(2).lower()
if suffix == "k":
suffix = 1000
elif suffix == "m":
suffix = 1000000
elif suffix == "ki":
suffix = 1 << 10
elif suffix == "mi":
suffix = 1 << 20
else:
pass # huh?
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
else:
pywikibot.error('Chunk size parameter is not valid.')
elif url == u'':
url = arg
else:
description.append(arg)
description = u' '.join(description)
while not ("://" in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
else:
error = 'Invalid input filename given.'
if not always:
error += ' Try again.'
if always:
url = None
break
else:
pywikibot.output(error)
url = pywikibot.input(u'URL, file or directory where files are now:')
if always and ((aborts is not True and ignorewarn is not True) or
not description or url is None):
additional = ''
missing = []
if url is None:
missing += ['filename']
additional = error + ' '
if description is None:
missing += ['description']
if aborts is not True and ignorewarn is not True:
additional += ('Either -ignorewarn or -abortonwarn must be '
'defined for all codes. ')
additional += 'Unable to run in -always mode'
suggest_help(missing_parameters=missing, additional_text=additional)
return False
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
if not recursive:
# Do not visit any subdirectories
directory_info[1][:] = []
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
bot = UploadRobot(url, description=description, useFilename=useFilename,
keepFilename=keepFilename,
verifyDescription=verifyDescription,
aborts=aborts, ignoreWarning=ignorewarn,
chunk_size=chunk_size, always=always,
summary=summary)
bot.run()
if __name__ == "__main__":
main()
|
npdoty/pywikibot
|
scripts/upload.py
|
Python
|
mit
| 7,699
|
[
"VisIt"
] |
38ccf719c5073b79434999c433fe7119d4f4fb95bdd00460c4dfd6c36276f311
|
#!/usr/bin/env python3
# Copyright (C) 2013-2019 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module parses the feature definition file features.def
import fileinput
import re
class SyntaxError(Exception):
def __init__(self, message, instead):
self.message = message
self.filename = fileinput.filename()
self.lineno = fileinput.filelineno()
self.instead = instead
def __str__(self):
return '%s: %2d: %s in the following line:\n%s' % \
(self.filename, self.lineno, self.message, self.instead)
def toCPPExpr(expr):
expr = expr.replace('and', ' && ')
expr = expr.replace('or', ' || ')
expr = expr.replace('not', ' !')
expr = re.sub('([A-Z0-9_]+)', 'defined(\\1)', expr)
return expr
class defs:
def __init__(self, filename):
# complete set of all defined features
allfeatures = set()
# list of implications (pairs of feature -> implied feature)
implications = list()
# list of requirements (pairs of feature -> requirement expr)
requirements = list()
# set of derived features
derived = set()
# list of derivations (pairs of feature -> derivation expr)
derivations = list()
# list of external features
externals = set()
for line in fileinput.input(filename):
line = line.strip()
# Ignore empty and comment lines
if not line or line.startswith(('#', '//', '/*')):
continue
# Tokenify line
tokens = line.split(None, 2)
# Register the feature
feature = tokens.pop(0)
allfeatures.add(feature)
# get the keyword
if tokens:
keyword = tokens.pop(0)
if not tokens:
rest = None
else:
rest = tokens[0]
# derived
if keyword == 'equals':
if rest is None:
raise SyntaxError("<feature> equals <expr>", line)
if feature in derived:
raise SyntaxError(
"Derived feature is already defined above:", line)
if feature in externals:
raise SyntaxError(
"Derived feature is already defined as external above:", line)
derived.add(feature)
derivations.append((feature, rest, toCPPExpr(rest)))
# externals
elif keyword == 'external':
if rest is not None:
raise SyntaxError("<feature> external", line)
if feature in derived:
raise SyntaxError(
"External feature is already defined as derived above:", line)
implied = set(map((lambda x_y: x_y[1]), implications))
if feature in implied:
raise SyntaxError(
"External feature is implied above:", line)
externals.add(feature)
# implications
elif keyword == 'implies':
if rest is None:
raise SyntaxError(
"<feature> implies [<feature>...]", line)
tokens = rest.split()
for implied in tokens:
if implied.endswith(','):
implied = implied[:-1]
if implied in externals:
raise SyntaxError(
"Implied feature %s is already defined as external above:" % feature, line)
implications.append((feature, implied))
# requires
elif keyword == 'requires':
if rest is None:
raise SyntaxError("<feature> requires <expr>", line)
requirements.append((feature, rest, toCPPExpr(rest)))
# allfeatures minus externals and derived
features = allfeatures.difference(derived)
features = features.difference(externals)
self.allfeatures = allfeatures
self.features = features
self.requirements = requirements
self.implications = implications
self.derived = derived
self.derivations = derivations
self.externals = externals
def check_validity(self, activated):
"""Check whether a set of features is valid.
Returns None if it is not and the set of features including implied features if it is.
"""
newset = activated.copy()
# handle implications
for feature, implied in self.implications:
if feature in newset and implied not in newset:
newset.add(implied)
# handle requirements
featurevars = dict()
derived = list(map((lambda x_y_z: x_y_z[0]), self.derivations))
allfeatures = self.features.union(derived, self.externals)
for feature in allfeatures:
featurevars[feature] = feature in newset
for feature, expr, _ in self.requirements:
if feature in newset:
if not eval(expr, featurevars):
return None
return newset
|
espressomd/espresso
|
src/config/featuredefs.py
|
Python
|
gpl-3.0
| 6,111
|
[
"ESPResSo"
] |
018c7ee8168b6539baaab3e058cd626e3381ff6cab92d917a6bdf6e71b23755d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
<<<<<<< HEAD
"""
Module implementing classes and functions to use Zeo++.
Zeo++ can be obtained from http://www.maciejharanczyk.info/Zeopp/
"""
from six.moves import map
__author__ = "Bharat Medasani"
__copyright = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "bkmedasani@lbl.gov"
__data__ = "Aug 2, 2013"
=======
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
import os
import re
from monty.io import zopen
from monty.dev import requires
from monty.tempfile import ScratchDir
from pymatgen.core.structure import Structure, Molecule
from pymatgen.core.lattice import Lattice
from pymatgen.io.cssr import Cssr
from pymatgen.io.xyz import XYZ
try:
from zeo.netstorage import AtomNetwork, VoronoiNetwork
from zeo.area_volume import volume, surface_area
<<<<<<< HEAD
from zeo.cluster import get_nearest_largest_diameter_highaccuracy_vornode,\
generate_simplified_highaccuracy_voronoi_network, \
prune_voronoi_network_close_node
=======
from zeo.cluster import get_nearest_largest_diameter_highaccuracy_vornode, \
generate_simplified_highaccuracy_voronoi_network, \
prune_voronoi_network_close_node
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
zeo_found = True
except ImportError:
zeo_found = False
<<<<<<< HEAD
=======
"""
Module implementing classes and functions to use Zeo++.
Zeo++ Installation Steps:
========================
1) Zeo++ requires Voro++. Download Voro++ from code.lbl.gov using
subversion:
"svn checkout --username anonsvn https://code.lbl.gov/svn/voro/trunk
Password is anonsvn.
2) Stable version of Zeo++ can be obtained from
http://www.maciejharanczyk.info/Zeopp/
Alternatively it can be obtained from code.lbl.gov. Replace voro
with zeo.
3) (Optional) Install cython from pip
Mac OS X:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment
(compiler, linker).
(b) Run make command
5) (a) Edit the Zeo++/trunk/cython_wrapper/setup.py to correctly point to
Voro++ directory.
(b) Run "python setup.py develop" to install Zeo++ python bindings.
Be patient, it will take a while.
Linux:
4) (a) Edit the Voro++/voro/trunk/config.mk file to suit your environment.
(b) Also add -fPIC option to CFLAGS variable in config.mk file.
(c) Run make command
5) (a) Go to Zeo++/zeo/trunk folder and compile zeo++ library using the
command "make dylib".
(b) Edit the Zeo++/trunk/cython_wrapper/setup_alt.py to correctly
point to Voro++ directory.
(c) Run "python setup_alt.py develop" to install Zeo++ python bindings.
Zeo++ Post-Installation Checking:
==============================
1) Go to pymatgen/io/tests and run "python test_zeoio.py"
If Zeo++ python bindings are properly installed, the tests should
pass. One or two tests will be skipped.
b) Go to pymatgen/analysis/defects/tests and run
"python test_point_defects.py". Lots of tests will be skipped if GULP
is not installed. But there should be no errors.
"""
__author__ = "Bharat Medasani"
__copyright = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__data__ = "Aug 2, 2013"
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
class ZeoCssr(Cssr):
"""
ZeoCssr adds extra fields to CSSR sites to conform with Zeo++
input CSSR format. The coordinate system is rorated from xyz to zyx.
This change aligns the pivot axis of pymatgen (z-axis) to pivot axis
of Zeo++ (x-axis) for structurural modifications.
Args:
structure: A structure to create ZeoCssr object
"""
def __init__(self, structure):
super(ZeoCssr, self).__init__(structure)
def __str__(self):
"""
CSSR.__str__ method is modified to padd 0's to the CSSR site data.
The padding is to conform with the CSSR format supported Zeo++.
The oxidation state is stripped from site.specie
Also coordinate system is rotated from xyz to zxy
"""
output = [
"{:.4f} {:.4f} {:.4f}"
<<<<<<< HEAD
#.format(*self.structure.lattice.abc),
.format(self.structure.lattice.c,
self.structure.lattice.a,
self.structure.lattice.b),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1"
#.format(*self.structure.lattice.angles),
.format(self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta),
=======
# .format(*self.structure.lattice.abc),
.format(self.structure.lattice.c,
self.structure.lattice.a,
self.structure.lattice.b),
"{:.2f} {:.2f} {:.2f} SPGR = 1 P 1 OPT = 1"
# .format(*self.structure.lattice.angles),
.format(self.structure.lattice.gamma,
self.structure.lattice.alpha,
self.structure.lattice.beta),
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"{} 0".format(len(self.structure)),
"0 {}".format(self.structure.formula)
]
for i, site in enumerate(self.structure.sites):
<<<<<<< HEAD
#if not hasattr(site, 'charge'):
# charge = 0
#else:
# charge = site.charge
charge = site.charge if hasattr(site, 'charge') else 0
#specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}"
.format(
i+1, specie, site.c, site.a, site.b, charge
#i+1, site.specie, site.a, site.b, site.c, site.charge
=======
# if not hasattr(site, 'charge'):
# charge = 0
# else:
# charge = site.charge
charge = site.charge if hasattr(site, 'charge') else 0
# specie = site.specie.symbol
specie = site.species_string
output.append(
"{} {} {:.4f} {:.4f} {:.4f} 0 0 0 0 0 0 0 0 {:.4f}"
.format(
i + 1, specie, site.c, site.a, site.b, charge
# i+1, site.specie, site.a, site.b, site.c, site.charge
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
)
)
return "\n".join(output)
@staticmethod
def from_string(string):
"""
Reads a string representation to a ZeoCssr object.
Args:
string: A string representation of a ZeoCSSR.
Returns:
ZeoCssr object.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
a = lengths.pop(-1)
lengths.insert(0, a)
alpha = angles.pop(-1)
angles.insert(0, alpha)
latt = Lattice.from_lengths_and_angles(lengths, angles)
sp = []
coords = []
chrg = []
for l in lines[4:]:
m = re.match(r'\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+' +
r'([0-9\-\.]+)\s+(?:0\s+){8}([0-9\-\.]+)', l.strip())
if m:
sp.append(m.group(1))
<<<<<<< HEAD
#coords.append([float(m.group(i)) for i in xrange(2, 5)])
=======
# coords.append([float(m.group(i)) for i in xrange(2, 5)])
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
# Zeo++ takes x-axis along a and pymatgen takes z-axis along c
coords.append([float(m.group(i)) for i in [3, 4, 2]])
chrg.append(m.group(5))
return ZeoCssr(
Structure(latt, sp, coords, site_properties={'charge': chrg})
)
@staticmethod
def from_file(filename):
"""
Reads a CSSR file to a ZeoCssr object.
Args:
filename: Filename to read from.
Returns:
ZeoCssr object.
"""
with zopen(filename, "r") as f:
return ZeoCssr.from_string(f.read())
class ZeoVoronoiXYZ(XYZ):
"""
Class to read Voronoi Nodes from XYZ file written by Zeo++.
The sites have an additional column representing the voronoi node radius.
The voronoi node radius is represented by the site property voronoi_radius.
Args:
mol: Input molecule holding the voronoi node information
"""
def __init__(self, mol):
super(ZeoVoronoiXYZ, self).__init__(mol)
@staticmethod
def from_string(contents):
"""
Creates Zeo++ Voronoi XYZ object from a string.
from_string method of XYZ class is being redefined.
Args:
contents: String representing Zeo++ Voronoi XYZ file.
Returns:
ZeoVoronoiXYZ object
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
prop = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" +
r"([0-9\-\.]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
<<<<<<< HEAD
#coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
=======
# coords.append(map(float, m.groups()[1:4])) # this is 0-indexed
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
coords.append([float(j)
for j in [m.group(i) for i in [3, 4, 2]]])
prop.append(float(m.group(5)))
return ZeoVoronoiXYZ(
Molecule(sp, coords, site_properties={'voronoi_radius': prop})
)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return ZeoVoronoiXYZ.from_string(f.read())
def __str__(self):
output = [str(len(self._mol)), self._mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(
self.precision
)
for site in self._mol:
output.append(fmtstr.format(
site.specie.symbol, site.z, site.x, site.y,
<<<<<<< HEAD
#site.specie, site.x, site.y, site.z,
=======
# site.specie, site.x, site.y, site.z,
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
site.properties['voronoi_radius']
))
return "\n".join(output)
<<<<<<< HEAD
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
=======
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def get_voronoi_nodes(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
<<<<<<< HEAD
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
=======
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
vornet, vor_edge_centers, vor_face_centers = \
atmnet.perform_voronoi_decomposition()
vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_lengths_and_angles(
structure.lattice.abc, structure.lattice.angles)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
<<<<<<< HEAD
#PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1],center[2],center[0]) for center in
vor_face_centers]
rot_edge_centers = [(center[1],center[2],center[0]) for center in
=======
# PMG-Zeo c<->a transformation for voronoi face centers
rot_face_centers = [(center[1], center[2], center[0]) for center in
vor_face_centers]
rot_edge_centers = [(center[1], center[2], center[0]) for center in
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
vor_edge_centers]
species = ["X"] * len(rot_face_centers)
prop = [0.0] * len(rot_face_centers) # Vor radius not evaluated for fc
vor_facecenter_struct = Structure(
lattice, species, rot_face_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
species = ["X"] * len(rot_edge_centers)
prop = [0.0] * len(rot_edge_centers) # Vor radius not evaluated for fc
vor_edgecenter_struct = Structure(
lattice, species, rot_edge_centers, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct
<<<<<<< HEAD
=======
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
print("{} {}".format(el, rad_dict[el].real), file=fp)
atmnet = AtomNetwork.read_from_CSSR(
<<<<<<< HEAD
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
#vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
#generate_simplified_highaccuracy_voronoi_network(atmnet)
#get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
=======
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_lengths_and_angles(
structure.lattice.abc, structure.lattice.angles)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct
<<<<<<< HEAD
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
=======
@requires(zeo_found,
"get_voronoi_nodes requires Zeo++ cython extension to be "
"installed. Please contact developers of Zeo++ to obtain it.")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1):
"""
Analyze the void space in the input structure using voronoi decomposition
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms. Default is
0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
rad_flag = False
if rad_dict:
rad_file = name + ".rad"
rad_flag = True
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
fp.write("{} {}\n".format(el, rad_dict[el].real))
atmnet = AtomNetwork.read_from_CSSR(
<<<<<<< HEAD
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file) as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia':fields[0],
'free_sph_max_dia':fields[1],
'inc_sph_along_free_sph_path_max_dia':fields[2]}
=======
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
out_file = "temp.res"
atmnet.calculate_free_sphere_parameters(out_file)
if os.path.isfile(out_file) and os.path.getsize(out_file) > 0:
with open(out_file, "rt") as fp:
output = fp.readline()
else:
output = ""
fields = [val.strip() for val in output.split()][1:4]
if len(fields) == 3:
fields = [float(field) for field in fields]
free_sphere_params = {'inc_sph_max_dia': fields[0],
'free_sph_max_dia': fields[1],
'inc_sph_along_free_sph_path_max_dia': fields[2]}
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
return free_sphere_params
# Deprecated. Not needed anymore
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3,
probe_rad=0.1):
"""
Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void
"""
with ScratchDir('.'):
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = name + ".rad"
with open(rad_file, 'w') as fp:
for el in rad_dict.keys():
fp.write("{0} {1}".format(el, rad_dict[el]))
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
vol = -1.0
break
if float(fields[1]) == 0:
vol = -1.0
break
vol = float(fields[3])
for line in sa_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
<<<<<<< HEAD
#raise ValueError("Too many voids")
=======
# raise ValueError("Too many voids")
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
sa = -1.0
break
if float(fields[1]) == 0:
sa = -1.0
break
sa = float(fields[3])
if not vol or not sa:
raise ValueError("Error in zeo++ output stream")
return vol, sa
|
Bismarrck/pymatgen
|
pymatgen/io/zeopp.py
|
Python
|
mit
| 23,655
|
[
"GULP",
"pymatgen"
] |
595552d7fc1b652f7b2c67ed2638174ba8857a9ad153265ec8b9a3287dd98c14
|
import copy
import logging
import mcerp
import numpy as np
import scipy.stats as ss
from mcerp import *
from utils.kde import Transformations
from utils.boxcox import BoxCox
class Distribution(object):
""" Distributional representation of uncertain variables.
"""
NON_ZERO_FACTOR = 1e-6
B_CACHE = {}
DUMMY = N(0, 1)
@staticmethod
def GetDummy():
return copy.copy(Distribution.DUMMY)
@staticmethod
def ConstantDistribution(val):
# type-safe but slower
# X = Distribution.GetDummy()
# X._mcpts = np.asarray([val] * mcerp.npts)
return val
@staticmethod
def DistributionFromBoxCoxGaussian(
target_mean, target_std, samples, lower=None, upper=None):
# Comment following codes to enable additional transformation.
if lower is not None:
lower = None
if upper is not None:
upper = None
k = 50
seeds = np.random.choice(len(samples), replace=False, size=k)
train_set = np.asarray([samples[s] for s in seeds])
samples = train_set
logging.debug('Distribution -- Boxcox sample size: {}'.format(len(samples)))
target_var = target_std * target_std
if lower is not None and upper is None:
samples = np.log(np.asarray(samples) - lower)
reverse_func = lambda x: np.exp(x) + lower
desired_mean = (np.log(target_mean - lower) -
(target_var/2) * (1/np.power(target_mean - lower, 2)))
desired_var = target_var * np.power(1/(target_mean - lower), 2)
elif lower is None and upper is not None:
samples = np.log(-1. * np.asarray(samples) - (-1. * upper))
reverse_func = lambda x: -1. * (np.exp(x) + (-1. * upper))
desired_mean = (np.log(-1.*target_mean + upper) -
(target_var/2) * (1/np.power(target_mean - upper, 2)))
desired_var = target_var * np.power(1/(target_mean - upper), 2)
elif lower is not None and upper is not None:
assert lower < upper, 'Distribution -- Input variable bound error, lower >= upper'
samples = (np.asarray(samples) - lower) / (upper - lower)
assert np.amin(samples) > 0 and np.amax(samples) < 1
samples = Transformations.logit(samples)
reverse_func = lambda x: Transformations.sigmoid(x * (upper-lower) + lower)
desired_mean = (Transformations.logit(target_mean) +
(target_var/2) *
((2*target_mean-1)/(np.power(target_mean*(target_mean-1), 2))))
desired_var = target_var * np.power(-1. / ((target_mean - 1) * target_mean), 2)
else:
reverse_func = lambda x: x
desired_mean = target_mean
desired_var = target_var
desired_std = np.sqrt(desired_var)
# Shift data set to positive if needed.
shift = np.amin(samples) - Distribution.NON_ZERO_FACTOR
if shift < 0:
samples = samples - shift
desired_mean = desired_mean - shift
bt_func = lambda x: reverse_func(x + shift)
else:
bt_func = reverse_func
# If the following test is not passed, use KDE instead.
#BoxCox.test(samples, la=-40, lb=100)
a = BoxCox.find_lambda(samples)
if a == .0:
bc_var = desired_var * np.power(1/desired_mean, 2)
bc_mean = np.log(desired_mean) - desired_var/2 * np.power(1/desired_mean, 2)
bc_std = np.sqrt(bc_var)
elif a == 1.:
bc_var = desired_var
bc_mean = desired_mean - 1 + desired_var/2
bc_std = np.sqrt(bc_var)
else:
bc_var = desired_var * np.power(np.power(desired_mean, a-1), 2)
bc_mean = ((np.power(desired_mean, a) - 1) / a +
(desired_var / 2) * ((a-1) * np.power(desired_mean, a-2)))
bc_std = np.sqrt(bc_var)
# Compute bounds on box-cox transformed domain.
bc_lower, bc_upper = None, None
if a > 0:
bc_lower = -1. / a
bc_upper = 2 * bc_mean - bc_lower
if a < 0:
bc_upper = -1. / a
bc_lower = 2 * bc_mean - bc_upper
Y = Distribution.GetDummy()
max_trials = 20
while (max_trials):
# X: Gaussian in BoxCox transformed domain.
X = Distribution.GaussianDistribution(bc_mean, bc_std, bc_lower, bc_upper)
# Y: distribution in original domain.
Y._mcpts = BoxCox.back_transform(X._mcpts, a)
Y._mcpts = bt_func(Y._mcpts)
if (Y._mcpts >= 0).all():
#logging.debug('CustomDist -- Generated dist: ({}, {})'.format(Y.mean, np.sqrt(Y.var)))
return Y
max_trials += -1
raise ValueError('Distribution -- Cannot generate proper BoxCox-Transformed distribution.')
@staticmethod
def DistributionFromSamplingFunction(sample_func, trans_func=None):
x = Distribution.GetDummy()
gen_vals = sample_func(mcerp.npts)
gen_vals = gen_vals.reshape(gen_vals.shape[-1])
if not trans_func:
# Must convert to np array explicitly, or mcerp will complain.
x._mcpts = np.asarray(gen_vals)
else:
assert(callable(trans_func))
# Must convert to np array explicitly, or mcerp will complain.
x._mcpts = np.asarray([trans_func(v) for v in gen_vals])
return x
@staticmethod
def NormalizedBinomialDistribution(mean, std):
if std == .0:
logging.warn('Distribution -- Trying to generate normalized Binomial with zero std.')
return Distribution.ConstantDistribution(0)
assert std > .0 and isinstance(std, float)
n = int(mean * (1 - mean) / (std ** 2))
assert n > 0
X = Binomial(n, mean) / n
adjust_x = []
for x in X._mcpts:
assert x >= 0 and x <= 1
if x == 0:
y = x + Distribution.NON_ZERO_FACTOR
elif x == 1:
y = x - Distribution.NON_ZERO_FACTOR
else:
y = x
adjust_x.append(y)
X._mcpts = np.asarray(adjust_x)
return X
@staticmethod
def HigherOrderBernoulli(p0, N):
""" Higher order beroullis are essentially Binomials.
"""
if (p0, N) not in Distribution.B_CACHE:
Distribution.B_CACHE[(p0, N)] = (Binomial(N, p0) if N > 0
else Distribution.ConstantDistribution(0))
return Distribution.B_CACHE[(p0, N)]
@staticmethod
def BinomialDistribution(mean, std, shift=0):
if std == .0:
logging.warn('Distribution -- Trying to generate Binomial with zero std.')
return Distribution.ConstantDistribution(mean)
assert std > .0 and isinstance(std, float)
mean = mean - shift
var = std * std
p = 1 - var/mean
n = int(round(mean * mean / (mean - var)))
return Binomial(n, p)
@staticmethod
def LogNormalDistribution(mean, std):
if std == .0:
return Distribution.ConstantDistribution(mean)
var = std * std
mean2 = mean * mean
mu = np.log(mean) - (var/2)*(1/mean2)
sigma = np.sqrt(var/mean2)
dist = UncertainVariable(ss.lognorm(sigma, scale=np.exp(mu)))
logging.debug('Distribution -- LogNormal: ({}, {})'.format(
dist.mean, np.sqrt(dist.var)))
return dist
@staticmethod
def GaussianDistribution(mean, std, a=None, b=None):
if std == .0:
return Distribution.ConstantDistribution(mean)
if a is None and b is None:
# Unbounded Gaussian.
return N(mean, std)
else:
dist = UncertainVariable(ss.truncnorm(
a = -np.inf if a is None else (a - mean) / std,
b = np.inf if b is None else (b - mean) / std,
loc = mean, scale = std))
logging.debug('Distribution -- truncated gaussian: {}, {} [{}, {}]'.format(
dist.mean, np.sqrt(dist.var), a, b))
return dist
@staticmethod
def QLogNormalDistribution(p0, mean, std):
""" Q-distribution with a log-normal base.
We name Q-distribution as the product of a Bernoulli and a base distribution.
"""
if std == .0:
return Distribution.ConstantDistribution(mean)
assert(p0 >=0 and p0 <= 1)
# Performance needs to be non zero, because num_core might be zero.
return (Bern(p0) * Distribution.LogNormalDistribution(mean, std) +
Distribution.NON_ZERO_FACTOR)
@staticmethod
def QGaussianDistribution(p0, mean, std, a = None, b = None):
if std == .0:
return Distribution.ConstantDistribution(mean)
assert(p0 >= 0 and p0 <= 1)
return (Bern(p0) * Distribution.GaussianDistribution(mean, std, a, b) +
Distribution.NON_ZERO_FACTOR)
|
UCSBarchlab/Archrisk
|
models/distributions.py
|
Python
|
bsd-3-clause
| 9,102
|
[
"Gaussian"
] |
247c0bcbb2c768b7eb625e5856a0462f56a76679e548c6e3447f24dc737beff1
|
from PySide.QtGui import QColor
__author__ = 'MaitreyaBuddha'
# Light theme
BACKGROUND_LIGHT = QColor("#FAFAFA")
CARD_LIGHT = QColor("#FFFFFF")
# Dark theme
BACKGROUND_DARK = QColor("#303030")
CARD_DARK = QColor("#424242")
# Teal-Amber color palette
DARK_PRIMARY_COLOR = QColor("#00796B")
PRIMARY_COLOR = QColor("#009688")
LIGHT_PRIMARY_COLOR = QColor("#B2DFDB")
TEXT_ICONS = QColor("#FFFFFF")
ACCENT_COLOR = QColor("#FFC107")
PRIMARY_TEXT = QColor("#212121")
SECONDARY_TEXT = QColor("#727272")
DIVIDER_COLOR = QColor("#B6B6B6")
|
samvidmistry/PyMaterial
|
MUtilities/MColors.py
|
Python
|
mit
| 533
|
[
"Amber"
] |
6cbcd3f2b232425b0b3b69136c0a5ba0002942cd1867956e7103ad7423a16c43
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy
short_description: Manage BIG-IP ASM policies
description:
- Manage BIG-IP ASM policies.
version_added: 2.5
deprecated:
removed_in: '2.12'
alternative: bigip_asm_policy_manage
why: >
The bigip_asm_policy module has been split into three new modules to handle import, export and general policy
management. This will allow scalability of the asm policy management as well as ease of maintenance.
Additionally to further reduce the burden of having multiple smaller module F5 has created asm_policy
role in Ansible Galaxy for a more declarative way of ASM policy management.
options:
active:
description:
- If C(yes) will apply and activate existing inactive policy. If C(no), it will
deactivate existing active policy. Generally should be C(yes) only in cases where
you want to activate new or existing policy.
default: no
type: bool
name:
description:
- The ASM policy to manage or create.
required: True
state:
description:
- When C(state) is C(present), and C(file) or C(template) parameter is provided,
new ASM policy is imported and created with the given C(name).
- When C(state) is present and no C(file) or C(template) parameter is provided
new blank ASM policy is created with the given C(name).
- When C(state) is C(absent), ensures that the policy is removed, even if it is
currently active.
choices:
- present
- absent
default: present
file:
description:
- Full path to a policy file to be imported into the BIG-IP ASM.
- Policy files exported from newer versions of BIG-IP cannot be imported into older
versions of BIG-IP. The opposite, however, is true; you can import older into
newer.
template:
description:
- An ASM policy built-in template. If the template does not exist we will raise an error.
- Once the policy has been created, this value cannot change.
- The C(Comprehensive), C(Drupal), C(Fundamental), C(Joomla),
C(Vulnerability Assessment Baseline), and C(Wordpress) templates are only available
on BIG-IP versions >= 13.
choices:
- ActiveSync v1.0 v2.0 (http)
- ActiveSync v1.0 v2.0 (https)
- Comprehensive
- Drupal
- Fundamental
- Joomla
- LotusDomino 6.5 (http)
- LotusDomino 6.5 (https)
- OWA Exchange 2003 (http)
- OWA Exchange 2003 (https)
- OWA Exchange 2003 with ActiveSync (http)
- OWA Exchange 2003 with ActiveSync (https)
- OWA Exchange 2007 (http)
- OWA Exchange 2007 (https)
- OWA Exchange 2007 with ActiveSync (http)
- OWA Exchange 2007 with ActiveSync (https)
- OWA Exchange 2010 (http)
- OWA Exchange 2010 (https)
- Oracle 10g Portal (http)
- Oracle 10g Portal (https)
- Oracle Applications 11i (http)
- Oracle Applications 11i (https)
- PeopleSoft Portal 9 (http)
- PeopleSoft Portal 9 (https)
- Rapid Deployment Policy
- SAP NetWeaver 7 (http)
- SAP NetWeaver 7 (https)
- SharePoint 2003 (http)
- SharePoint 2003 (https)
- SharePoint 2007 (http)
- SharePoint 2007 (https)
- SharePoint 2010 (http)
- SharePoint 2010 (https)
- Vulnerability Assessment Baseline
- Wordpress
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Import and activate ASM policy
bigip_asm_policy:
name: new_asm_policy
file: /root/asm_policy.xml
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import ASM policy from template
bigip_asm_policy:
name: new_sharepoint_policy
template: SharePoint 2007 (http)
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create blank ASM policy
bigip_asm_policy:
name: new_blank_policy
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create blank ASM policy and activate
bigip_asm_policy:
name: new_blank_policy
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Activate ASM policy
bigip_asm_policy:
name: inactive_policy
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Deactivate ASM policy
bigip_asm_policy:
name: active_policy
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import and activate ASM policy in Role
bigip_asm_policy:
name: new_asm_policy
file: "{{ role_path }}/files/asm_policy.xml"
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import ASM binary policy
bigip_asm_policy:
name: new_asm_policy
file: "/root/asm_policy.plc"
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
active:
description: Set when activating/deactivating ASM policy
returned: changed
type: bool
sample: yes
state:
description: Action performed on the target device.
returned: changed
type: string
sample: absent
file:
description: Local path to ASM policy file.
returned: changed
type: string
sample: /root/some_policy.xml
template:
description: Name of the built-in ASM policy template
returned: changed
type: string
sample: OWA Exchange 2007 (https)
name:
description: Name of the ASM policy to be managed/created
returned: changed
type: string
sample: Asm_APP1_Transparent
'''
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
updatables = [
'active',
]
returnables = [
'name',
'template',
'file',
'active',
]
api_attributes = [
'name',
'file',
'active',
]
api_map = {
'filename': 'file',
}
@property
def template_link(self):
if self._values['template_link'] is not None:
return self._values['template_link']
collection = self._templates_from_device()
for resource in collection['items']:
if resource['name'] == self.template.upper():
return dict(link=resource['selfLink'])
return None
@property
def full_path(self):
return fq_name(self.name)
def _templates_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/asm/policy-templates/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class V1Parameters(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP',
'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS',
'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP',
'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS',
'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP',
'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS',
'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP',
'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS',
'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP',
'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS',
'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP',
'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS',
'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP',
'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS',
'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP',
'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS',
'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT',
'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP',
'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS',
'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP',
'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS',
'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP',
'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS',
'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP',
'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS'
}
if self._values['template'] in template_map:
return template_map[self._values['template']]
else:
raise F5ModuleError(
"The specified template is not valid for this version of BIG-IP."
)
class V2Parameters(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP',
'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS',
'Comprehensive': 'POLICY_TEMPLATE_COMPREHENSIVE', # v13
'Drupal': 'POLICY_TEMPLATE_DRUPAL', # v13
'Fundamental': 'POLICY_TEMPLATE_FUNDAMENTAL', # v13
'Joomla': 'POLICY_TEMPLATE_JOOMLA', # v13
'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP',
'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS',
'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP',
'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS',
'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP',
'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS',
'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP',
'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS',
'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP',
'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS',
'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP',
'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS',
'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP',
'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS',
'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT',
'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP',
'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS',
'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP',
'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS',
'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP',
'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS',
'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP',
'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS',
'Vulnerability Assessment Baseline': 'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT', # v13
'Wordpress': 'POLICY_TEMPLATE_WORDPRESS' # v13
}
return template_map[self._values['template']]
class Changes(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP': 'ActiveSync v1.0 v2.0 (http)',
'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS': 'ActiveSync v1.0 v2.0 (https)',
'POLICY_TEMPLATE_COMPREHENSIVE': 'Comprehensive',
'POLICY_TEMPLATE_DRUPAL': 'Drupal',
'POLICY_TEMPLATE_FUNDAMENTAL': 'Fundamental',
'POLICY_TEMPLATE_JOOMLA': 'Joomla',
'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP': 'LotusDomino 6.5 (http)',
'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS': 'LotusDomino 6.5 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP': 'OWA Exchange 2003 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS': 'OWA Exchange 2003 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2003 with ActiveSync (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2003 with ActiveSync (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP': 'OWA Exchange 2007 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS': 'OWA Exchange 2007 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2007 with ActiveSync (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2007 with ActiveSync (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP': 'OWA Exchange 2010 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS': 'OWA Exchange 2010 (https)',
'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP': 'Oracle 10g Portal (http)',
'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS': 'Oracle 10g Portal (https)',
'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP': 'Oracle Applications 11i (http)',
'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS': 'Oracle Applications 11i (https)',
'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP': 'PeopleSoft Portal 9 (http)',
'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS': 'PeopleSoft Portal 9 (https)',
'POLICY_TEMPLATE_RAPID_DEPLOYMENT': 'Rapid Deployment Policy',
'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP': 'SAP NetWeaver 7 (http)',
'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS': 'SAP NetWeaver 7 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP': 'SharePoint 2003 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS': 'SharePoint 2003 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP': 'SharePoint 2007 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS': 'SharePoint 2007 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP': 'SharePoint 2010 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS': 'SharePoint 2010 (https)',
'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT': 'Vulnerability Assessment Baseline',
'POLICY_TEMPLATE_WORDPRESS': 'Wordpress',
}
return template_map[self._values['template']]
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def active(self):
if self.want.active is True and self.have.active is False:
return True
if self.want.active is False and self.have.active is True:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
self.have = None
self.changes = Changes()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if not self.exists():
return False
else:
return self.remove()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if any(p['name'] == self.want.name and p['partition'] == self.want.partition for p in response['items']):
return True
return False
def _file_is_missing(self):
if self.want.template and self.want.file is None:
return False
if self.want.template is None and self.want.file is None:
return False
if not os.path.exists(self.want.file):
return True
return False
def create(self):
if self.want.active is None:
self.want.update(dict(active=False))
if self._file_is_missing():
raise F5ModuleError(
"The specified ASM policy file does not exist"
)
self._set_changed_options()
if self.module.check_mode:
return True
if self.want.template is None and self.want.file is None:
self.create_blank()
else:
if self.want.template is not None:
self.create_from_template()
elif self.want.file is not None:
self.create_from_file()
if self.want.active:
self.activate()
return True
else:
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
if self.changes.active:
self.activate()
return True
def activate(self):
self.have = self.read_current_from_device()
task_id = self.apply_on_device()
if self.wait_for_task(task_id, 'apply'):
return True
else:
raise F5ModuleError('Apply policy task failed.')
def wait_for_task(self, task_id, task):
uri = ''
if task == 'apply':
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
elif task == 'import':
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/import-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] in ['COMPLETED', 'FAILURE']:
break
time.sleep(1)
if response['status'] == 'FAILURE':
return False
if response['status'] == 'COMPLETED':
return True
def _get_policy_id(self):
name = self.want.name
partition = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
policy_id = next(
(p['id'] for p in response['items'] if p['name'] == name and p['partition'] == partition), None
)
if not policy_id:
raise F5ModuleError("The policy was not found")
return policy_id
def update_on_device(self):
params = self.changes.api_params()
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
if not params['active']:
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_blank(self):
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError(
'Failed to create ASM policy: {0}'.format(self.want.name)
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError(
'Failed to delete ASM policy: {0}'.format(self.want.name)
)
return True
def is_activated(self):
if self.want.active is True:
return True
else:
return False
def read_current_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
response.update((dict(self_link=response['selfLink'])))
return Parameters(params=response)
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def import_to_device(self):
name = os.path.split(self.want.file)[1]
self.upload_file_to_device(self.want.file, name)
time.sleep(2)
full_name = fq_name(self.want.partition, self.want.name)
cmd = 'tmsh load asm policy {0} file /var/config/rest/downloads/{1}'.format(full_name, name)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_temp_policy_from_device(self):
name = os.path.split(self.want.file)[1]
tpath_name = '/var/config/rest/downloads/{0}'.format(name)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=tpath_name
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def apply_on_device(self):
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
params = dict(policyReference={'link': self.have.self_link})
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['id']
def create_from_template_on_device(self):
full_name = fq_name(self.want.partition, self.want.name)
cmd = 'tmsh create asm policy {0} policy-template {1}'.format(full_name, self.want.template)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
# we need to remove active from params as API will raise an error if the active is set to True,
# policies can only be activated via apply-policy task endpoint.
params.pop('active')
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 401, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
time.sleep(2)
return response['selfLink']
def remove_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
if self.version_is_less_than_13():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def version_is_less_than_13(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
class V1Manager(BaseManager):
def __init__(self, *args, **kwargs):
client = kwargs.get('client', None)
module = kwargs.get('module', None)
super(V1Manager, self).__init__(client=client, module=module)
self.want = V1Parameters(params=module.params, client=client)
def create_from_file(self):
self.import_to_device()
self.remove_temp_policy_from_device()
def create_from_template(self):
self.create_from_template_on_device()
class V2Manager(BaseManager):
def __init__(self, *args, **kwargs):
client = kwargs.get('client', None)
module = kwargs.get('module', None)
super(V2Manager, self).__init__(client=client, module=module)
self.want = V2Parameters(params=module.params, client=client)
def create_from_template(self):
if not self.create_from_template_on_device():
return False
def create_from_file(self):
if not self.import_to_device():
return False
self.remove_temp_policy_from_device()
class ArgumentSpec(object):
def __init__(self):
self.template_map = [
'ActiveSync v1.0 v2.0 (http)',
'ActiveSync v1.0 v2.0 (https)',
'Comprehensive',
'Drupal',
'Fundamental',
'Joomla',
'LotusDomino 6.5 (http)',
'LotusDomino 6.5 (https)',
'OWA Exchange 2003 (http)',
'OWA Exchange 2003 (https)',
'OWA Exchange 2003 with ActiveSync (http)',
'OWA Exchange 2003 with ActiveSync (https)',
'OWA Exchange 2007 (http)',
'OWA Exchange 2007 (https)',
'OWA Exchange 2007 with ActiveSync (http)',
'OWA Exchange 2007 with ActiveSync (https)',
'OWA Exchange 2010 (http)',
'OWA Exchange 2010 (https)',
'Oracle 10g Portal (http)',
'Oracle 10g Portal (https)',
'Oracle Applications 11i (http)',
'Oracle Applications 11i (https)',
'PeopleSoft Portal 9 (http)',
'PeopleSoft Portal 9 (https)',
'Rapid Deployment Policy',
'SAP NetWeaver 7 (http)',
'SAP NetWeaver 7 (https)',
'SharePoint 2003 (http)',
'SharePoint 2003 (https)',
'SharePoint 2007 (http)',
'SharePoint 2007 (https)',
'SharePoint 2010 (http)',
'SharePoint 2010 (https)',
'Vulnerability Assessment Baseline',
'Wordpress',
]
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
file=dict(type='path'),
template=dict(
choices=self.template_map
),
active=dict(
type='bool'
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=[
['file', 'template']
]
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
veger/ansible
|
lib/ansible/modules/network/f5/_bigip_asm_policy.py
|
Python
|
gpl-3.0
| 38,818
|
[
"Galaxy"
] |
6ebdba6a99097d29fac0c8c0c4619601b0d867d734dccacc428acdd469b48a4d
|
from abc import ABCMeta, abstractmethod
class OpticAxisRule(object):
# return values of neighbor_for_photor and
# neighbor_that_provide_photor should be the
# following:
# [x] is the neighbor at position x
# [x, y] is the neighbor at position y
# of neighbor at position x
# this is commutative so order does not matter
# positions are relative to o.
# The actual directions are given by `_get_unit_axis`
# of `HexagonArray` class
# 1
# 6 2
# o
# 5 3
# 4
# 0 is the current column, arrangement is based
# on right eye as seen from inside or left eye as
# seen from outside.
# 5 and 6 is the anterior side and 2, 3 the posterior
# dorsal/ventral depend on whether it is the left or
# the right eye.
# For the left eye, 1 is dorsal.
# For the right eye, 4 is dorsal.
# See also RFC #3 for the coordinate system for the left eye.
# The right eye is constructed by a rotation in opposit direction
# of the rotation of left eye.
__metaclass__ = ABCMeta
inds = {'R{}'.format(i+1): i+1 for i in range(6)}
@classmethod
def name_to_ind(cls, name):
try:
return cls.inds[name]
except KeyError:
print('"{}" is not a valid neuron name'.format(name))
raise
@classmethod
def is_photor(cls, name):
return name in cls.inds.keys()
@abstractmethod
def neighbor_for_photor(self, photor_ind):
return
@abstractmethod
def neighbor_that_provide_photor(self, photor_ind):
return
# same as Right Bottom
class OpticAxisNeuralSuperpositionLeftTop(OpticAxisRule):
def __init__(self):
self.name = 'neural superposition'
def neighbor_for_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [2]
elif photor_ind == 2:
neighbor = [3]
elif photor_ind == 3:
neighbor = [3, 4]
elif photor_ind == 4:
neighbor = [4]
elif photor_ind == 5:
neighbor = [5]
elif photor_ind == 6:
neighbor = [6]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-6.'
.format(photor_ind))
return neighbor
def neighbor_that_provide_photor(self, photor_ind):
# like neighbor_for_photor
if photor_ind == 1:
neighbor = [5]
elif photor_ind == 2:
neighbor = [6]
elif photor_ind == 3:
neighbor = [1, 6]
elif photor_ind == 4:
neighbor = [1]
elif photor_ind == 5:
neighbor = [2]
elif photor_ind == 6:
neighbor = [3]
return neighbor
# same as Right Top
class OpticAxisNeuralSuperpositionLeftBottom(OpticAxisRule):
def __init__(self):
self.name = 'neural superposition'
def neighbor_for_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [3]
elif photor_ind == 2:
neighbor = [2]
elif photor_ind == 3:
neighbor = [1, 2]
elif photor_ind == 4:
neighbor = [1]
elif photor_ind == 5:
neighbor = [6]
elif photor_ind == 6:
neighbor = [5]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-6.'
.format(photor_ind))
return neighbor
def neighbor_that_provide_photor(self, photor_ind):
# like neighbor_for_photor
if photor_ind == 1:
neighbor = [6]
elif photor_ind == 2:
neighbor = [5]
elif photor_ind == 3:
neighbor = [4, 5]
elif photor_ind == 4:
neighbor = [4]
elif photor_ind == 5:
neighbor = [3]
elif photor_ind == 6:
neighbor = [2]
return neighbor
# same as Left Bottom
class OpticAxisNeuralSuperpositionRightTop(OpticAxisRule):
def __init__(self):
self.name = 'neural superposition'
def neighbor_for_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [3]
elif photor_ind == 2:
neighbor = [2]
elif photor_ind == 3:
neighbor = [1, 2]
elif photor_ind == 4:
neighbor = [1]
elif photor_ind == 5:
neighbor = [6]
elif photor_ind == 6:
neighbor = [5]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-6.'
.format(photor_ind))
return neighbor
def neighbor_that_provide_photor(self, photor_ind):
# like neighbor_for_photor
if photor_ind == 1:
neighbor = [6]
elif photor_ind == 2:
neighbor = [5]
elif photor_ind == 3:
neighbor = [4, 5]
elif photor_ind == 4:
neighbor = [4]
elif photor_ind == 5:
neighbor = [3]
elif photor_ind == 6:
neighbor = [2]
return neighbor
# same as Left Top
class OpticAxisNeuralSuperpositionRightBottom(OpticAxisRule):
def __init__(self):
self.name = 'neural superposition'
def neighbor_for_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [2]
elif photor_ind == 2:
neighbor = [3]
elif photor_ind == 3:
neighbor = [3, 4]
elif photor_ind == 4:
neighbor = [4]
elif photor_ind == 5:
neighbor = [5]
elif photor_ind == 6:
neighbor = [6]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-6.'
.format(photor_ind))
return neighbor
def neighbor_that_provide_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [5]
elif photor_ind == 2:
neighbor = [6]
elif photor_ind == 3:
neighbor = [1, 6]
elif photor_ind == 4:
neighbor = [1]
elif photor_ind == 5:
neighbor = [2]
elif photor_ind == 6:
neighbor = [3]
return neighbor
class OpticAxisPlain(OpticAxisRule):
def __init__(self):
self.name = 'plain'
def neighbor_for_photor(self, photor_ind):
return [0]
def neighbor_that_provide_photor(self, photor_ind):
return [0]
class RuleHexArrayMap(object):
'''
A class that assigns columns based on composition rule
in a consistent way.
'''
def __init__(self, rule, hexarray):
# keys are tuples (column_id, photoreceptorname)
neighbors_for_photor = {}
neighbors_that_provide_photor = {}
for el in hexarray.elements:
for neuron, ind in OpticAxisRule.inds.items():
neighbordr = rule.neighbor_for_photor(ind)
neighborid = hexarray.get_neighborid(el.gid, neighbordr)
# While given selector is connected to a port
# search for a selector of a cartridge
# in the opposite direction (than the original rule).
# Stop if selector is the same as the previous.
# Here we assume the 2 methods of opticaxis
# return cartridge ids in opposite directions and if
# there is no cartridge in one of those directions
# method returns the current cartridge id
# instead of the neighbor's.
# In case the rule connects ommatidium and cartridges
# with same ids there should be no conflict in the first
# place (the following check is always False)
# Another option is to ignore those connections
# but unconnected ports cause problems elsewhere
while (neighborid, neuron) in \
neighbors_that_provide_photor.keys():
neighbordr = rule.neighbor_that_provide_photor(ind)
neighborid_new = hexarray.get_neighborid(neighborid, neighbordr)
if neighborid_new == neighborid:
break
else:
neighborid = neighborid_new
neighbors_for_photor[(el.gid, neuron)] = neighborid
neighbors_that_provide_photor[(neighborid, neuron)] = el.gid
self.neighbors_for_photor = neighbors_for_photor
self.neighbors_that_provide_photor = neighbors_that_provide_photor
def neighbor_for_photor(self, column_id, photor):
return self.neighbors_for_photor[(column_id, photor)]
def neighbor_that_provide_photor(self, column_id, photor):
return self.neighbors_that_provide_photor[(column_id, photor)]
# don't use this directly
# implementation might change
_class_dict = {
'Plain': OpticAxisPlain,
'SuperpositionLT': OpticAxisNeuralSuperpositionLeftTop,
'SuperpositionLB': OpticAxisNeuralSuperpositionLeftBottom,
'SuperpositionRT': OpticAxisNeuralSuperpositionRightTop,
'SuperpositionRB': OpticAxisNeuralSuperpositionRightBottom
}
def opticaxisFactory(rule):
try:
return _class_dict[rule]
except KeyError:
raise ValueError('Value {} not in axis rules {}'
' dictionary'.format(rule, _class_dict.keys()))
def main():
axis = opticaxisFactory('Superposition')()
neuronname = 'R1'
ind = axis.name_to_ind(neuronname)
print(axis.neighbor_for_photor(ind))
if __name__ == '__main__':
main()
|
neurokernel/retina
|
retina/geometry/opticaxis.py
|
Python
|
bsd-3-clause
| 9,626
|
[
"NEURON"
] |
363b6556607915c87f3620324d32131d4f1966ff1847dc6e9f26e846359f663b
|
from MeshInfo import *
''' Provides Information about ExodusII meshes '''
class ExodusIIMeshInfo(MeshInfo):
def __init__(self, mesh_item_data, file_name):
MeshInfo.__init__(self, mesh_item_data)
self.file_name = file_name
import vtk
reader = vtk.vtkExodusIIReader()
reader.SetFileName(self.file_name)
reader.UpdateInformation()
num_nodesets = reader.GetNumberOfNodeSetArrays()
num_sidesets = reader.GetNumberOfSideSetArrays()
num_blocks = reader.GetNumberOfElementBlockArrays()
self.nodesets = set()
for i in xrange(num_nodesets):
self.nodesets.add(reader.GetObjectId(vtk.vtkExodusIIReader.NODE_SET,i))
if 'Unnamed' not in reader.GetObjectName(vtk.vtkExodusIIReader.NODE_SET,i).split(' '):
self.nodesets.add(reader.GetObjectName(vtk.vtkExodusIIReader.NODE_SET,i).split(' ')[0])
self.sidesets = set()
for i in xrange(num_sidesets):
self.sidesets.add(reader.GetObjectId(vtk.vtkExodusIIReader.SIDE_SET,i))
if 'Unnamed' not in reader.GetObjectName(vtk.vtkExodusIIReader.SIDE_SET,i).split(' '):
self.sidesets.add(reader.GetObjectName(vtk.vtkExodusIIReader.SIDE_SET,i).split(' ')[0])
self.blocks = set()
for i in xrange(num_blocks):
self.blocks.add(reader.GetObjectId(vtk.vtkExodusIIReader.ELEM_BLOCK,i))
if 'Unnamed' not in reader.GetObjectName(vtk.vtkExodusIIReader.ELEM_BLOCK,i).split(' '):
self.blocks.add(reader.GetObjectName(vtk.vtkExodusIIReader.ELEM_BLOCK,i).split(' ')[0])
def blockNames(self):
return self.blocks
def sidesetNames(self):
return self.sidesets
def nodesetNames(self):
return self.nodesets
|
gleicher27/Tardigrade
|
moose/gui/mesh_info/ExodusIIMeshInfo.py
|
Python
|
lgpl-2.1
| 1,654
|
[
"VTK"
] |
d9a685d66923eece0fe6d26575b32d3a6dffc509e05bae81d2a242c3e993cad0
|
#! /usr/bin/env python
from MDAnalysis import *
import numpy
import math
import sys
my_traj = sys.argv[1]
u = Universe("init.pdb",my_traj)
v = Universe("init.pdb")
end = my_traj.find('.pdb')
fout_name = 'pepdist.dat'
# helices
a = u.selectAtoms("segid A and resid 50:73")
b = u.selectAtoms("segid B and resid 56:86")
# peptide
c = u.selectAtoms("segid C")
f = open(fout_name,'w')
for ts in u.trajectory:
distance1 = numpy.linalg.norm(a.centerOfMass() - c.centerOfMass())
distance2 = numpy.linalg.norm(b.centerOfMass() - c.centerOfMass())
f.write('%7.3f %7.3f\n' % (distance1,distance2))
f.close()
|
demharters/git_scripts
|
dist_pep_mhcii.py
|
Python
|
apache-2.0
| 629
|
[
"MDAnalysis"
] |
1ec3a469fc089322e0438a71175f6d5facffc627aa07c2b61b60fc0939643ae9
|
"""Tests for the CheckMigration Operation"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import pytest
from mock import MagicMock
from DIRAC import S_OK
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.DataManagementSystem.Agent.RequestOperations import CheckMigration
MODULE = 'DIRAC.DataManagementSystem.Agent.RequestOperations.CheckMigration'
FILE_NAME = 'fileName'
N_FILES = 3
@pytest.fixture
def listOfLFNs():
lfns = []
for index, name in enumerate([FILE_NAME] * N_FILES):
lfns.append('/vo/%s_%d' % (name, index))
return lfns
@pytest.fixture
def seMock(mocker):
"""Mock call to StorageElement."""
seModMock = mocker.MagicMock(name='StorageElementModule')
seClassMock = mocker.MagicMock(name='StorageElementClass')
seClassMock.getFileMetadata = mocker.MagicMock(return_value=S_OK({'Migrated': 0}))
seModMock.return_value = seClassMock
mocker.patch(MODULE + '.StorageElement', new=seModMock)
return seModMock, seClassMock
@pytest.fixture
def checkRequestAndOp(listOfLFNs):
req = Request()
req.RequestName = 'MyRequest'
op = Operation()
op.Type = 'CheckMigration'
for index, lfn in enumerate(listOfLFNs):
oFile = File()
oFile.LFN = lfn
oFile.Size = index
oFile.Checksum = '01130a%0d' % index
oFile.ChecksumType = 'adler32'
op.addFile(oFile)
req.addOperation(op)
return req, op
@pytest.fixture
def multiRetVal(listOfLFNs):
"""Return a return structure for multiple values"""
def retFunc(*args, **kwargs):
retVal = {'OK': True, 'Value':
{'Failed': {},
'Successful': {},
}}
for lfn in listOfLFNs:
if kwargs.get('OK', not kwargs.get('Error', False)):
retVal['Value']['Successful'][lfn] = {'Migrated': kwargs.get('Migrated', 0)}
else:
retVal['Value']['Failed'][lfn] = kwargs.get('Error', 'Failed to do X')
return retVal
return retFunc
@pytest.fixture
def checkMigration(mocker, checkRequestAndOp):
cm = CheckMigration.CheckMigration(checkRequestAndOp[1])
return cm
def test_constructor(checkMigration):
assert checkMigration.waitingFiles == []
def test_run_NotMigrated(checkMigration, seMock, multiRetVal):
seModMock, seClassMock = seMock
seClassMock.getFileMetadata = MagicMock(side_effect=functools.partial(multiRetVal, Migrated=0))
checkMigration._run()
assert len(checkMigration.waitingFiles) == N_FILES
seModMock.assert_called_with('')
for opFile in checkMigration.operation:
assert opFile.Status == 'Waiting'
def test_run_Migrated(checkMigration, seMock, multiRetVal):
seModMock, seClassMock = seMock
seClassMock.getFileMetadata = MagicMock(side_effect=functools.partial(multiRetVal, Migrated=1))
checkMigration._run()
assert len(checkMigration.waitingFiles) == N_FILES
for opFile in checkMigration.operation:
assert opFile.Status == 'Done'
def test_run_Failed(checkMigration, seMock, multiRetVal):
seModMock, seClassMock = seMock
seClassMock.getFileMetadata = MagicMock(side_effect=functools.partial(multiRetVal, Error='Fail Fail Fail'))
checkMigration._run()
assert len(checkMigration.waitingFiles) == N_FILES
for opFile in checkMigration.operation:
assert opFile.Status == 'Waiting'
def test_call_Migrated(checkMigration, seMock, multiRetVal):
seModMock, seClassMock = seMock
seClassMock.getFileMetadata = MagicMock(side_effect=functools.partial(multiRetVal, Migrated=1))
assert checkMigration()['OK']
assert len(checkMigration.waitingFiles) == N_FILES
for opFile in checkMigration.operation:
assert opFile.Status == 'Done'
def test_call_Exception(checkMigration, seMock, multiRetVal):
seModMock, seClassMock = seMock
seClassMock.getFileMetadata = MagicMock(side_effect=RuntimeError('Throw Down'))
ret = checkMigration()
assert not ret['OK']
assert ret['Message'] == 'Throw Down'
assert len(checkMigration.waitingFiles) == N_FILES
for opFile in checkMigration.operation:
assert opFile.Status == 'Waiting'
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/Agent/RequestOperations/test/Test_CheckMigration.py
|
Python
|
gpl-3.0
| 4,209
|
[
"DIRAC"
] |
1a6f54ab02d4f0403e6cd3ef94659f7e2114e9c94306f8a9ccab21f2b863eb02
|
"""
Bond order information.
"""
CCDC_BOND_ORDERS = {
# http://cima.chem.usyd.edu.au:8080/cif/skunkworks/html/ddl1/mif/bond.html
'S': 1.0, # single (two-electron) bond or sigma bond to metal
'D': 2.0, # double (four-electron) bond
'T': 3.0, # triple (six-electron) bond
'Q': 4.0, # quadruple (eight-electron, metal-metal) bond
'A': 1.5, # alternating normalized ring bond (aromatic)
'C': 1.0, # catena-forming bond in crystal structure
'E': 1.5, # equivalent (delocalized double) bond
'P': 1.0, # pi bond (metal-ligand pi interaction)
'Am': 1.41, # Amide bond (non standard)
1.0: 'S', # single (two-electron) bond or sigma bond to metal
2.0: 'D', # double (four-electron) bond
3.0: 'T', # triple (six-electron) bond
4.0: 'Q', # quadruple (eight-electron, metal-metal) bond
1.5: 'A', # alternating normalized ring bond (aromatic)
1.41: 'Am' # Amide bond (non standard)
}
|
peteboyd/lammps_interface
|
lammps_interface/ccdc.py
|
Python
|
mit
| 949
|
[
"CRYSTAL"
] |
08631d43443fe3932109fa97ec891ab6aebad1a0c766ae9a53eb2ab6321db688
|
import logging
import urllib2
import urllib
import re
import os
import time
from pprint import pprint
from stringcookiejar import StringCookieJar
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.'
'13) Gecko/2009073022 Firefox/3.0.13 (.NET CLR 3.5.30729)',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;'
'q=0.8',
'Accept-Language': 'ru,en-us;q=0.7,en;q=0.3',
'Accept-Charset': 'windows-1251,utf-8;q=1,*;q=0',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'Content-Type': 'application/x-www-form-urlencoded',
}
class VKError(Exception):
pass
class VKConnector:
"""Connector to vkontakte web site.
Can get pages from vkontakte web site.
Automatically log in if session is timed out.
"""
def __init__(self, email=None, password=None, cookiestring=''):
self.email = email
self.password = password
self.cookiejar = StringCookieJar(cookiestring)
self.maxreloads = 2
self.sleeptime = 2
self.vkhost = 'http://vkontakte.ru'
self.vkloginhost = 'http://login.vk.com'
handler = urllib2.HTTPCookieProcessor(self.cookiejar)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
def get_page(self, path):
"""Get page from vkontakte web site.
path -- absolute path to page with slash at the beginning.
"""
for i in xrange(self.maxreloads):
# sleep to not be banned
time.sleep(self.sleeptime)
data = None
req = urllib2.Request(self.vkhost + path, data, headers)
handle = urllib2.urlopen(req)
text = handle.read()
if len(text) < 1100:
# session timed out
self.relogin()
else:
text = text.decode('cp1251')
return text
raise VKError('Could not get page from %s. Please, try again later.',
self.vkhost)
def relogin(self):
"""Refresh session. Email and password are not needed."""
# 1.get s-value
data = None
req = urllib2.Request(self.vkloginhost + '/?vk=', data, headers)
handle = urllib2.urlopen(req)
text = handle.read()
match = re.search(r"action='(.*?)'.*?name='s' id='s' value='(.*?)'",
text, re.DOTALL)
if match:
# action and svalue should be something like
# "http://vk.com/login.php?op=slogin&nonenone=1"
# and "nonenone"
action, svalue = match.group(1), match.group(2)
else:
raise VKError("s-value not returned.")
# 2.submitting s-value
data = urllib.urlencode({'s': svalue})
req = urllib2.Request(action, data, headers)
handle = urllib2.urlopen(req)
# text should be "<html></html>"
text = handle.read()
# if valid s-value not returned we should login again
if svalue == 'nonenone':
self.login()
# else, remixsid cookie have been received and
# any page can be accessed
else:
return
def get_cookie(self):
"""Get cookie in text format.
Can be saved and then passed to VKConnector constructor.
"""
return self.cookiejar.save()
def login(self):
"""Log in to vkontakte web site."""
# 1.visit index page
data = None
req = urllib2.Request(self.vkhost + '/index.php', data, headers)
handle = urllib2.urlopen(req)
text = handle.read()
# 2.get s-form from http://login.vk.com/?vk=1
data = None
req = urllib2.Request(self.vkloginhost + '/?vk=', data, headers)
handle = urllib2.urlopen(req)
text = handle.read()
# 3.submit this form (send s-value)
data = urllib.urlencode({'s': 'nonenone'})
req = urllib2.Request(self.vkhost + '/login.php?op=slogin&nonenone=1',
data, headers)
handle = urllib2.urlopen(req)
# text should be "<html></html>"
text = handle.read()
# 4. press 'submit' buttom in form first form
data = urllib.urlencode({'op': 'a_login_attempt'})
req = urllib2.Request(self.vkhost + '/login.php', data, headers)
handle = urllib2.urlopen(req)
# text shoul be 'vklogin' ?
text = handle.read()
# 5.actually submit login form. Get s-value.
data = urllib.urlencode({'email': self.email, 'pass': self.password,
'expire': '', 'vk': ''})
req = urllib2.Request(self.vkloginhost + '/?act=login', data, headers)
handle = urllib2.urlopen(req)
text = handle.read()
match = re.search(r"name='s' value='(.*?)'", text)
if match:
svalue = match.group(1)
else:
raise VKError("s-value not returned.")
#6. send s-value. receive "remixsid" cookie
data = urllib.urlencode({'op': 'slogin', 'redirect': '1',
'expire': '0', 'to': '', 's': svalue})
req = urllib2.Request(self.vkhost + '/login.php', data, headers)
handle = urllib2.urlopen(req)
# text should be index page content
text = handle.read()
# "remixsid" received?
if not filter(lambda c: c.domain == '.vkontakte.ru' and
c.name == 'remixsid' and c.value != 'nonenone',
self.cookiejar):
raise VKError('remixsid is not returned')
class DummyVKConnector:
"""Emulator of connector to vkontakte site.
Implements VKConnector interface.
Acts like real VKConnector, but uses files in some directory
instead of real web site or if 'text' provided returns this text.
Useful for testing.
"""
def __init__(self, email=None, password=None, cookiestring=''):
self.cookiestring = cookiestring
self.path = "file://" + os.path.join(os.path.dirname(__file__),
"testdata")
self.vkhost = 'http://vkontakte.ru'
def get_page(self, path):
if self.text:
return self.text
host = '%s%s' % (self.path, path)
conn = urllib2.Request(host, None, {})
text = urllib2.urlopen(conn).read().decode('UTF-8')
return text
def get_cookie(self):
return self.cookiestring
def login(self):
pass
def relogin(self):
pass
|
trashgenerator/vkontakte-spy
|
vkontakte_spy/connectors.py
|
Python
|
bsd-3-clause
| 6,677
|
[
"VisIt"
] |
d80b49b4c77d5762a496ba8401a327b3c0e16e29028c177dbb2f88b5e331bac9
|
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging
import optparse
if sys.hexversion < 0x02070000:
print("This script requires Python 2.7 or later.")
print("Currently run with version: {0}".format(sys.version))
print("Please install it. The source for Python can be found at: " \
"http://www.python.org/.")
sys.exit(-1)
try:
from PyQt4 import QtCore, QtGui, QtOpenGL
except ImportError, e:
print("This script requires PyQt4")
print("Please install it. The source for PyQt4 can be found at: " \
"http://www.PyQt4.org")
print("Error: {0}".format(e))
sys.exit(-1)
try:
import gdcm
gdcm.Trace.DebugOff()
gdcm.Trace.WarningOff()
gdcm.Trace.ErrorOff()
except ImportError, e:
print("This script requires GDCM 2.x.x")
print("Please install it. The source for GDCM can be found at: " \
"http://gdcm.sourceforge.net/.")
print("Error: {0}".format(e))
sys.exit(-1)
if gdcm.Version.GetMajorVersion() != 2:
print("This script requires GDCM 2.x.x")
print("Currently run with version: {0}".format(gdcm.Version.GetVersion()))
print("Please upgrade your GDCM library. The " \
"source for GDCM can be found at: http://gdcm.sourceforge.net/.")
sys.exit(-1)
try:
import vtk
vtk.vtkObject.GlobalWarningDisplayOff()
except ImportError, e:
print("This script requires VTK 5.8.x or later")
print("Please install it. The source for VTK can be found at: " \
"http://www.vtk.org/.")
print("Error: {0}".format(e))
sys.exit(-1)
if vtk.vtkVersion.GetVTKMajorVersion() != 5 \
or vtk.vtkVersion.GetVTKMinorVersion() < 8:
print("This script requires VTK 5.8.x or later")
print("Currently run with version: {0}" \
.format(vtk.vtkVersion.GetVTKVersion()))
print("Please upgrade your VTK library. The " \
"source for VTK can be found at: http://www.vtk.org/.")
sys.exit(-1)
try:
import vtkgdcm
except ImportError, e:
print("This script requires GDCM 2.x.x with vtk")
print("Please install it. The source for GDCM can be found at: " \
"http://gdcm.sourceforge.net/.")
print("Error: {0}".format(e))
sys.exit(-1)
from .utils import constant as constant
from .utils import debuger as debuger
from .utils import logger as logger
from .utils import configure as configure
from .utils.versionconverter import convertToActualVersion
from .utils import i18n as i18n
from bloodstone.importer.database.dbutils import DBUtils
class Moonstone(QtGui.QApplication):
def __init__(self, args):
logging.debug("In Moonstone::__init__()")
super(Moonstone, self).__init__(args)
self.startDragTime()
self.startDragDistance()
# TODO: BUG PyQt4! Remove this impl!
#self.locale()
logging.debug(":: qt_locale")
language = str(QtCore.QLocale.system().name())
localedir = i18n.qt_locale_dir(language)
localefilename = i18n.qt_locale_filename()
translator = QtCore.QTranslator()
trans = translator.load(localefilename, localedir)
if trans:
logging.debug(":: Install Qt traslator")
logging.debug("++ filename: {0}".format(localefilename))
logging.debug("++ localdir: {0}".format(localedir))
QtCore.QCoreApplication.installTranslator(translator)
else:
logging.warning("Could not load the file internationalization: {0}"\
.format(localefilename))
self.showSplashScreen()
align = QtCore.Qt.AlignTop | QtCore.Qt.AlignRight
color = QtGui.QColor()
color.setRgb(240, 240, 216)
self.splash.showMessage(QtGui.QApplication.translate(
"MainWindow", "Setting up the main window...", None,
QtGui.QApplication.UnicodeUTF8), align, color)
self.processEvents()
self.splash.showMessage(QtGui.QApplication.translate(
"MainWindow", "Configure moonstone...", None,
QtGui.QApplication.UnicodeUTF8), align, color)
self.configure()
self.processEvents()
self.splash.showMessage(QtGui.QApplication.translate(
"MainWindow", "Checking database...", None,
QtGui.QApplication.UnicodeUTF8), align, color)
self.checkDatabase()
self.processEvents()
self.verifyVersion()
self.splash.showMessage(QtGui.QApplication.translate(
"MainWindow", "Show main window...", None,
QtGui.QApplication.UnicodeUTF8), align, color)
self.showMainWindow()
if not QtGui.QSystemTrayIcon.isSystemTrayAvailable():
self.connect(self, QtCore.SIGNAL("lastWindowClosed()"),
self, QtCore.SLOT("quit()"))
self.hideSplashScreen()
def showSplashScreen(self):
logging.debug("In Moonstone::showSplashScreen()")
from .gui.qt.widget import resources_rc
self.splash = QtGui.QSplashScreen()
self.splash.setPixmap(QtGui.QPixmap(
":/static/default/splashscreen/splashscreen.png"))
self.splash.show()
def hideSplashScreen(self):
logging.debug("In Moonstone::hideSplashScreen()")
self.splash.clearMessage()
self.splash.finish(self.mainWindow)
if self.splash:
del self.splash
def configure(self):
logging.debug("In Moonstone::configure()")
configure.qt_coding()
configure.init_resources()
def locale(self):
logging.debug("In Moonstone::locale()")
i18n.gettext_locale()
#i18n.qt_locale()
def showMainWindow(self):
logging.debug("In Moonstone::showMainWindow()")
from .gui.qt.mainwindow import MainWindow
import time
time.sleep(2)
self.mainWindow = MainWindow("main")
#self.mainWindow.createImportWindow()
self.mainWindow.showMaximized()
def checkDatabase(self):
logging.debug("In Moonstone::checkDataBase()")
dbUtil = DBUtils()
dbUtil.createConnection()
def verifyVersion(self):
convertToActualVersion()
#
#
#
#if __name__ in ("__main__", "moonstone.main"):
# Moonstone(sys.argv)
|
zoulianmp/moonstone-pyqt
|
moonstone/main.py
|
Python
|
lgpl-3.0
| 7,136
|
[
"VTK"
] |
eef42c86c708029a248fd7d536e4b8f7084ccc293fa044274b864e49101cd605
|
#!/usr/bin/env python
# Written by Rose A. Finn, Nov 9, 2013
#
'''
goal is to match Luc Simard's B/D GIM2D catalog with my NSA catalogs.
Simard catalog is in research/SimardSDSS2011/table1and3.fits
this has 108 columns!!!
I would like to figure out a way to do this without have to write out every column
individually. will sleep on it and look at this again in the morning.
G'night
------------------------------------------------------------
catalog description
------------------------------------------------------------
from http://vizier.cfa.harvard.edu/viz-bin/Cat?J/ApJS/196/11
------------------------------------------------------------
Byte-by-byte Description of file: table[12].dat
Bytes Format Units Label Explanations
1- 18 A18 --- objID SDSS "objID" object identifier (run, rerun,
camcol, field, object)
20- 29 F10.6 --- z ?=-99.99 SDSS redshift (spectroscopic if
available; or photometric ("-1" in "Sp")
31- 32 I2 --- Sp [-2/6] SDSS SpecClass value (G1)
34- 40 F7.3 kpc/arcsec Scale ?=-99.99 Physical scale at redshift z
42- 51 F10.3 Mpc+3 Vmax ?=-99.99 Galaxy volume correction (Eq. 7)
53- 58 F6.2 mag gg2d ?=-99.99 GIM2D B+D model g-band magnitude;
(Eq. 1)
60- 65 F6.2 mag e_gg2d ?=-99.99 Uncertainty in gg2d
67- 72 F6.2 mag rg2d ?=-99.99 GIM2D B+D model r-band magnitude;
(Eq. 1)
74- 79 F6.2 mag e_rg2d ?=-99.99 Uncertainty in rg2d
81- 86 F6.2 mag gg2df ?=-99.99 B+D model g-band fiber magnitude
88- 93 F6.2 mag rg2df ?=-99.99 B+D model r-band fiber magnitude
95-100 F6.2 mag dCol [-4.60,+3.83]?=-99.99 Delta fiber color (G2)
102-107 F6.2 --- (B/T)g [0,1]?=-99.99 g-band bulge fraction
109-114 F6.2 --- e_(B/T)g [0,1]?=-99.99 Uncertainty in (B/T)g
116-121 F6.2 --- (B/T)r [0,1]?=-99.99 r-band bulge fraction
123-128 F6.2 --- e_(B/T)r [0,1]?=-99.99 Uncertainty in (B/T)r
130-135 F6.2 --- (B/T)gf ?=-99.99 g-band fiber bulge fraction
137-142 F6.2 --- (B/T)rf ?=-99.99 r-band fiber bulge fraction
144-150 F7.2 kpc Rhlg [0,1488]?=-99.99 g-band galaxy semi-major
axis, half-light radius
152-158 F7.2 kpc Rhlr [0,1488]?=-99.99 r-band galaxy semi-major
axis, half-light radius
160-166 F7.2 kpc Rchl,g [-336,308] g-band galaxy circular
half-light radius
168-174 F7.2 kpc Rchl,r [-336,287] r-band galaxy circular
half-light radius
176-181 F6.2 kpc Re ?=-99.99 Bulge semi-major effective radius
183-188 F6.2 kpc e_Re ?=-99.99 Uncertainty in Re
190-195 F6.2 --- e [0,1]?=-99.99 Bulge ellipticity (G3)
197-202 F6.2 --- e_e [0,1]?=-99.99 Uncertainty in e
204-210 F7.2 deg phib [-360,360] Bulge position angle (G4)
212-217 F6.2 deg e_phib ?=-99.99 Uncertainty in phib
219-224 F6.2 kpc Rd ?=-99.99 Exponential disk scale length
226-231 F6.2 kpc e_Rd ?=-99.99 Uncertainty in Rd
233-238 F6.2 deg i ?=-99.99 Disk inclination angle (1)
240-245 F6.2 deg e_i ?=-99.99 Uncertainty in i
247-253 F7.2 deg phid [-360,360] Disk position angle (G4)
255-260 F6.2 deg e_phid ?=-99.99 Uncertainty in phid
262-267 F6.2 arcsec (dx)g ?=-99.99 g-band B+D model center X offset
(G5)
269-274 F6.2 arcsec e_(dx)g ?=-99.99 Uncertainty in (dx)g
276-281 F6.2 arcsec (dy)g ?=-99.99 g-band B+D model center Y offset
(G5)
283-288 F6.2 arcsec e_(dy)g ?=-99.99 Uncertainty in (dy)g
290-295 F6.2 arcsec (dx)r ?=-99.99 r-band B+D model center X offset
(G5)
297-302 F6.2 arcsec e_(dx)r ?=-99.99 Uncertainty in (dx)r
304-309 F6.2 arcsec (dy)r ?=-99.99 r-band B+D model center Y offset
(G5)
311-316 F6.2 arcsec e_(dy)r ?=-99.99 Uncertainty in (dy)r
318-323 F6.2 --- S2g ?=-99.99 g-band image smoothness parameter
(G6)
325-330 F6.2 --- S2r ?=-99.99 r-band image smoothness parameter
(G6)
332-337 F6.2 mag ggMag ?=-99.99 Absolute, rest-frame g-band GIM2D
galaxy magnitude (Eq. 3a)
339-344 F6.2 mag e_ggMag ?=-99.99 Uncertainty in ggMag
346-351 F6.2 mag gbMag ?=-99.99 Absolute, rest-frame g-band GIM2D
bulge magnitude (Eq. 3c)
353-358 F6.2 mag e_gbMag ?=-99.99 Uncertainty in gbMag
360-365 F6.2 mag gdMag ?=-99.99 Absolute, rest-frame g-band GIM2D
disk magnitude (Eq. 3e)
367-372 F6.2 mag e_gdMag ?=-99.99 Uncertainty in gdMag
374-379 F6.2 mag rgMag ?=-99.99 Absolute, rest-frame r-band GIM2D
galaxy magnitude (Eq. 3b)
381-386 F6.2 mag e_rgMag ?=-99.99 Uncertainty in rgMag
388-393 F6.2 mag rbMag ?=-99.99 Absolute, rest-frame r-band GIM2D
bulge magnitude (Eq. 3d)
395-400 F6.2 mag e_rbMag ?=-99.99 Uncertainty in rbMag
402-407 F6.2 mag rdMag ?=-99.99 Absolute, rest-frame r-band GIM2D
disk magnitude (Eq. 3f)
409-414 F6.2 mag e_rdMag ?=-99.99 Uncertainty in rdMag
416-421 F6.2 --- nb [0,8]?=-99.99 Bulge Sersic index (4.00 for
table 1)
423-428 F6.2 --- e_nb [0,4]?=-99.99 Uncertainty in nb (0.00 for
table 1)
430-435 F6.2 --- PpS [0,1]?=-99.99 F-test probability (2)
439-442 F4.2 --- Pn4 [0,1]? F-test probability (only for
table 2) (3)
Note (1): i=0 for face-on disk.
Note (2): That a B+D model is not required compared to a pure Sersic model.
Note (3): That a free nb B+D model is not required compared to a fixed
nb=4 B+D model.
Byte-by-byte Description of file: table3.dat
Bytes Format Units Label Explanations
1- 18 A18 --- objID SDSS object identifier
20- 29 F10.6 --- z ?=-99.99 SDSS redshift (spectroscopic if
available or photometric ("-1" in "Sp")
31- 32 I2 --- Sp [-2/6] SDSS SpecClass value (G1)
36- 42 F7.3 kpc/arcsec Scale ?=-99.99 Physical scale at redshift z
44- 53 F10.3 Mpc+3 Vmax ?=-99.99 Galaxy volume correction (Eq. 7)
55- 60 F6.2 mag gg2d ?=-99.99 GIM2D pure Sersic model g-band
magnitude
62- 67 F6.2 mag e_gg2d ?=-99.99 Uncertainty in gg2d
69- 74 F6.2 mag rg2d ?=-99.99 GIM2D pure Sersic model r-band
magnitude
76- 81 F6.2 mag e_rg2d ?=-99.99 Uncertainty in rg2d
83- 88 F6.2 mag gg2df ?=-99.99 GIM2D pure Sersic model g-band
fiber magnitude
90- 95 F6.2 mag rg2df ?=-99.99 GIM2D pure Sersic model r-band
fiber magnitude
97-102 F6.2 mag dCol [-4.95,+5.73]?=-99.99 Delta fiber color (G2)
104-109 F6.2 kpc Rhlg ?=-99.99 g-band galaxy semi-major axis,
half-light radius
111-116 F6.2 kpc Rhlr ?=-99.99 r-band galaxy semi-major axis,
half-light radius
118-124 F7.2 kpc Rchl,g [-336,284] g-band galaxy circular half-light
radius
126-132 F7.2 kpc Rchl,r [-336,284] r-band galaxy circular half-light
radius
134-139 F6.2 --- e ?=-99.99 Galaxy ellipticiy (G3)
141-146 F6.2 --- e_e ?=-99.99 Uncertainty in e
148-154 F7.2 deg phi [-360,360] Galaxy position angle (G4)
156-161 F6.2 deg e_phi ?=-99.99 Uncertainty in phi
163-168 F6.2 arcsec (dx)g [-42,32]?=-99.99 g-band pure Sersic model
center X offset (G5)
170-175 F6.2 arcsec e_(dx)g ?=-99.99 Uncertainty in (dx)g
177-182 F6.2 arcsec (dy)g [-38,30]?=-99.99 g-band pure Sersic model
center Y offset (G5)
184-189 F6.2 arcsec e_(dy)g ?=-99.99 Uncertainty in (dy)g
191-196 F6.2 arcsec (dx)r [-31,30]?=-99.99 r-band pure Sersic model
center X offset (G5)
198-203 F6.2 arcsec e_(dx)r ?=-99.99 Uncertainty in (dx)r
205-210 F6.2 arcsec (dy)r [-38,30]?=-99.99 r-band pure Sersic model
center Y offset (G5)
212-217 F6.2 arcsec e_(dy)r ?=-99.99 Uncertainty in (dy)r
219-224 F6.2 --- S2g [-89,98]?=-99.99 g-band image smoothness
parameter (G6)
226-231 F6.2 --- S2r [-75,99]?=-99.99 r-band image smoothness
parameter (G6)
233-238 F6.2 mag ggMag [-37,7]?=-99.99 Absolute, rest-frame g-band
GIM2D galaxy magnitude (Eq. 3a)
240-245 F6.2 mag e_ggMag ?=-99.99 Uncertainty in ggMag
247-252 F6.2 mag rgMag [-36,6]?=-99.99 Absolute, rest-frame r-band
GIM2D galaxy magnitude (Eq. 3b)
254-259 F6.2 mag e_rgMag ?=-99.99 Uncertainty in rgMag
261-266 F6.2 --- ng [0,8]?=-99.99 Galaxy Sersic index
268-273 F6.2 --- e_ng ?=-99.99 Uncertainty in ng
Global notes:
Note (G1): Flags as follows:
-1 = photometric;
-2 = no redshift available
0 = unknown: spectrum not classifiable (zConf<0.25)
1 = star
2 = galaxy
3 = QSO
4 = high-redshift quasar, z>2.3
5 = Spectrum of blank sky.
6 = STAR_LATE: Star dominated by molecular bands M or later.
Note (G2): Delta fiber color defined as: (g-r)gim2d,fiber-(g-r)SDSS,fiber.
Note (G3): e=1-b/a, e=0 for a circular bulge.
Note (G4): Measured clockwise from the +y axis of SDSS images.
Note (G5): From Column|Row position given by colc(g|r)|rowc(g|r) on SDSS
corrected image.
Note (G6): As defined in Simard et al. (2009A&A...508.1141S).
'''
import atpy, os
from pylab import *
from LCScommon import *
mypath=os.getcwd()
from LCSReadmasterBaseNSA import *
def findnearest(x1,y1,x2,y2,delta):#use where command
matchflag=1
nmatch=0
d=sqrt((x1-x2)**2 + (y1-y2)**2)#x2 and y2 are arrays
index=arange(len(d))
t=index[d<delta]
matches=t
if len(matches) > 0:
nmatch=len(matches)
if nmatch > 1:
imatch=index[(d == min(d[t]))]
else:
imatch=matches[0]
else:
imatch = 0
matchflag = 0
return imatch, matchflag,nmatch
class cluster(baseClusterNSA):
def __init__(self,clustername):
baseClusterNSA.__init__(self,clustername)
self.readsdsscsv()
#Get current path so program can tell if this is being run on Becky or Rose's computer
def readsdsscsv(self):
infile=homedir+'research/LocalClusters/NSAmastertables/SDSSTables/'+self.prefix+'_SDSS_dr7.csv'
scat=atpy.Table(infile,type='ascii',data_start=1)
self.sdss_run=scat.col3
self.sdss_rerun=scat.col4
self.sdss_camcol=scat.col5
self.sdss_field=scat.col6
self.sdss_rowc=scat.col17
self.sdss_objid=scat.col16
self.sdss_colc=scat.col15
self.sdss_r = scat.col10
self.sdss_ra=scat.col1
self.sdss_dec=scat.col2
self.useraflag=0
if len(self.sdss_ra) != len(self.ra):
print 'WARNING: problem with sdss cat for ',self.prefix
self.useraflag=1
def match2zoo(self,delta):
#self.zoo_objid=[]
i_match=zeros(len(self.ra),'i')
i_match_flag=zeros(len(self.ra),'i')
z=zeros(len(self.ra),'i')
for i in range(len(self.ra)):
if self.useraflag:
imatch,matchflag,nmatch=findnearest(self.ra[i],self.dec[i],bd.zdat._RA,bd.zdat._DE,delta)
if matchflag:
#print i,nmatch, self.n.ISDSS[i],' found match to simard B/D sample'
#print self.sdss_objid[i],bd.zdat.objID_1[imatch]
i_match[i]=imatch
i_match_flag[i]=matchflag
else:
print i,'Oh no! no match for galaxy ',i
#print self.sdss_objid[i]
else:
try:
imatch=bd.zdict[str(self.sdss_objid[i])]
#print i,'found a match using dictionary'
i_match[i]=imatch
i_match_flag[i]=1
#print i_match[i],i_match_flag[i]
#if i_match_flag[i] == 0:
# print 'heyyyyyyyyyyyyyyy!!!!!!'
except KeyError:
print 'no match with dictionary'
# write out results as a fits table that is line-matched to cluster NSA table
#ncols=shape(bd.zdat)[1]
#nrows=shape(bd.zdat)[0]
#newarray=zeros((nrows,ncols))
print self.prefix,': ',len(i_match)-sum(i_match_flag),'/',len(i_match),' galaxies not matched'
#ztab=bd.zdat.rows(i_match)
#ztabsubbd.where(imatch)
otab=atpy.Table()
otab.add_column('matchflag',i_match_flag,dtype='bool')
otab.add_column('matchindex',i_match)
zdatindex=i_match[i_match_flag]
#print zdatindex
for i in range(len(bd.zdat.names)):
dtype=bd.zdat.columns[i]
col=zeros(len(self.ra),dtype)
#print zdatindex
for j in range(len(i_match)):
if i_match_flag[j]:
col[j]=bd.zdat[bd.zdat.names[i]][i_match[j]]
#col[i_match_flag]=bd.zdat[bd.zdat.names[i]][zdatindex]
# get rid of column names that start with _ or __ b/c they cause trouble down the road
if bd.zdat.names[i].startswith('__'):
colname=bd.zdat.names[i][2:]
elif bd.zdat.names[i].startswith('_'):
colname=bd.zdat.names[i][1:]
else:
colname=bd.zdat.names[i]
print colname
otab.add_column(colname,col,unit=bd.zdat.units[bd.zdat.names[i]])
outfile=homedir+'research/LocalClusters/NSAmastertables/SimardGIM2D/'+self.prefix+'_GIM2D.fits'
if os.path.exists(outfile):
os.remove(outfile)
otab.write(outfile)
class simard:
def __init__(self):
# infile=homedir+'research/NSA/nsa_v0_1_2.fits'
#infile=homedir+'research/SimardSDSS2011/vizier_votable-9.vot'
#self.zdat=atpy.Table(infile,type='vo')
infile=homedir+'research/SimardSDSS2011/table1and3.fits'
self.zdat=atpy.Table(infile,type='fits')
self.zRA=self.zdat._RA
self.zDEC=self.zdat._DE
self.zdict=dict((a,b) for a,b in zip(self.zdat.objID_1,arange(len(self.zRA))))
bd=simard()
# match radius = 3"/3600 -> deg
delta=2./3600.
#mkw11=cluster('MKW11')
#mkw11.match2zoo(delta)
myclusternames=['MKW11', 'MKW8', 'AWM4', 'A2063', 'A2052', 'NGC6107', 'Coma', 'A1367', 'Hercules']
#myclusternames=['MKW11']
for cname in myclusternames:
cl=cluster(cname)
print '\n',cl.prefix, '\n'
cl.match2zoo(delta)
|
rfinn/LCS
|
paper1code/LCSmatchSimardBD2NSA.py
|
Python
|
gpl-3.0
| 15,568
|
[
"Galaxy"
] |
1d8163482ea9619f73a5d7731bcf2dba52911e318b716483f0b9c5d65a6cffbc
|
from __future__ import print_function, division
import math
import numpy as np
def map_lattice_to_coordinate_space(grid, x, y=None):
"""Maps the lattice coordinate to the coordinate space depending on the
coordinate system.
Parameters
----------
* `grid`: Lattice object
Defines the topology.
* `x`: int.
Grid point.
* `y`: int.
Grid point, 2D case.
"""
if y is None:
_y = 0
else:
_y = y
if grid.coordinate_system == "cartesian":
idy = grid.start_y*grid.delta_y + 0.5*grid.delta_y + _y*grid.delta_y
x_c = grid.global_no_halo_dim_x * grid.delta_x * 0.5
y_c = grid.global_no_halo_dim_y * grid.delta_y * 0.5
idx = grid.start_x*grid.delta_x + 0.5*grid.delta_x + x*grid.delta_x
if idx - x_c < -grid.length_x*0.5:
idx += grid.length_x
if idx - x_c > grid.length_x*0.5:
idx -= grid.length_x
if idy - y_c < -grid.length_y*0.5:
idy += grid.length_y
if idy - y_c > grid.length_y*0.5:
idy -= grid.length_y
if y is None:
return idx - x_c
else:
return idx - x_c, idy - y_c
elif grid.coordinate_system == "cylindrical":
idy = grid.delta_y * (grid.start_y + 0.5 + _y)
idx = grid.delta_x * (grid.start_x - 0.5 + x)
y_c = grid.global_no_halo_dim_y * grid.delta_y * 0.5
if idy - y_c < -grid.length_y*0.5:
idy += grid.length_y
if idy - y_c > grid.length_y*0.5:
idy -= grid.length_y
if y is None:
return idx
else:
return idx, idy - y_c
def imprint(state, function):
"""Multiply the wave function of the state by the function provided.
Parameters
----------
* `function` : python function
Function to be printed on the state.
Notes
-----
Useful, for instance, to imprint solitons and vortices on a condensate.
Generally, it performs a transformation of the state whose wave function
becomes
.. math:: \psi(x,y)' = f(x,y) \psi(x,y)
being :math:`f(x,y)` the input function and :math:`\psi(x,y)` the initial
wave function.
Example
-------
>>> import trottersuzuki as ts # import the module
>>> grid = ts.Lattice2D() # Define the simulation's geometry
>>> def vortex(x,y): # Vortex function
>>> z = x + 1j*y
>>> angle = np.angle(z)
>>> return np.exp(1j * angle)
>>> state = ts.GaussianState(grid, 1.) # Create the system's state
>>> state.imprint(vortex) # Imprint a vortex on the state
"""
try:
function(0)
def _function(x, y):
return function(x)
except TypeError:
_function = function
matrix = np.zeros((state.grid.dim_y, state.grid.dim_x),
dtype=np.complex128)
for y in range(state.grid.dim_y):
for x in range(state.grid.dim_x):
matrix[y, x] = _function(*map_lattice_to_coordinate_space(state.grid, x, y))
state.imprint_matrix(matrix.real, matrix.imag)
def get_vortex_position(grid, state, approx_cloud_radius=0.):
"""
Get the position of a single vortex in the quantum state (only for
Cartesian coordinates).
Parameters
----------
* `grid` : Lattice object
Define the geometry of the simulation.
* `state` : State object
System's state.
* `approx_cloud_radius` : float, optional
Radius of the circle, centered at the Lattice's origin, where the
vortex core is expected to be.
Returns
-------
* `coords` numpy array
Coordinates of the vortex core's position (coords[0]: x coordinate;
coords[1]: y coordinate).
Notes
-----
Only one vortex must be present in the state.
Example
-------
>>> import trottersuzuki as ts # import the module
>>> import numpy as np
>>> grid = ts.Lattice2D() # Define the simulation's geometry
>>> state = ts.GaussianState(grid, 1.) # Create a Gaussian state
>>> def vortex_a(x, y): # Define the vortex to be imprinted
>>> z = x + 1j*y
>>> angle = np.angle(z)
>>> return np.exp(1j * angle)
>>> state.imprint(vortex) # Imprint the vortex on the state
>>> ts.get_vortex_position(grid, state)
array([ 8.88178420e-16, 8.88178420e-16])
"""
if approx_cloud_radius == 0.:
approx_cloud_radius = np.sqrt(2) * grid.length_x
delta_y = grid.length_y / float(grid.dim_y)
delta_x = grid.length_x / float(grid.dim_x)
matrix = state.get_phase()
# calculate norm gradient matrix
norm_grad = np.zeros((grid.dim_x, grid.dim_y))
for idy in range(1, grid.dim_y-1):
for idx in range(1, grid.dim_x-1):
if (idx-grid.dim_x*0.5)**2 + (idy-grid.dim_y*0.5)**2 < \
approx_cloud_radius**2/delta_x**2:
up = matrix[idy+1, idx]
dw = matrix[idy-1, idx]
rg = matrix[idy, idx+1]
lf = matrix[idy, idx-1]
if abs(up-dw) > np.pi:
up -= np.sign(up) * 2. * np.pi
if abs(rg-lf) > np.pi:
rg -= np.sign(rg) * 2. * np.pi
grad_x = (rg-lf)/(2.*delta_x)
grad_y = (up-dw)/(2.*delta_y)
norm_grad[idy, idx] = np.sqrt(grad_x**2 + grad_y**2)
max_norm = np.nanmax(norm_grad) # Find max value in norm_grad
coord = np.transpose(np.where(norm_grad == max_norm))[0]
# Check that the phase has a single discontinuity around the candidate
# vortex position
def position(index, side):
index = int(index)
side = int(side)
# position is periodic of period 4*radius
idx = int(math.fmod(index, 4*side))
quad = idx // side
rest = int(math.fmod(idx, side))
if quad == 0:
x = - (side // 2 + 1)
y = rest - side // 2
if quad == 1:
y = (side // 2 + 1)
x = rest - side // 2
if quad == 2:
x = (side // 2 + 1)
y = - (rest - side // 2)
if quad == 3:
y = - (side // 2 + 1)
x = - (rest - side // 2)
return np.array([y, x])
side = 8 # must be an even number
vortex = 0
# Count the number of discontinuity in the phase pattern around the
# candidate vortex position
for i in range(side*4):
pos1 = coord + position(i, side)
pos2 = coord + position(i+1, side)
if pos1[0] < 0 or pos1[0] >= grid.dim_x or pos1[1] < 0 or \
pos1[1] >= grid.dim_x:
if pos2[0] < 0 or pos2[0] >= grid.dim_x or pos2[1] < 0 or \
pos2[1] >= grid.dim_x:
break
phase1 = matrix[pos1[0], pos1[1]]
phase2 = matrix[pos2[0], pos2[1]]
if np.abs(phase1 - phase2) >= np.pi * 1.7:
vortex += 1
# Around the vortex there must be a single discontinuity in the phase
if vortex != 1:
return np.array([np.nan, np.nan])
coord_x = []
coord_y = []
for idy in range(1, grid.dim_y-1):
for idx in range(1, grid.dim_x-1):
if norm_grad[idy, idx] >= max_norm*0.9:
coord_x.append((idx + 0.5) * delta_x - 0.5 * grid.length_x)
coord_y.append((idy + 0.5) * delta_y - 0.5 * grid.length_y)
coords = np.zeros(2)
for i in range(len(coord_x)):
coords[1] += coord_y[i] / float(len(coord_x))
coords[0] += coord_x[i] / float(len(coord_x))
return coords
|
trotter-suzuki-mpi/trotter-suzuki-mpi
|
src/Python/trottersuzuki/tools.py
|
Python
|
gpl-3.0
| 7,676
|
[
"Gaussian"
] |
458648a896b4b4587c8e66493dc79c187abe395ccf888a7860a2df2de84059c2
|
"""
Serializes a Cython code tree to Cython code. This is primarily useful for
debugging and testing purposes.
The output is in a strict format, no whitespace or comments from the input
is preserved (and it could not be as it is not present in the code tree).
"""
from Cython.Compiler.Visitor import TreeVisitor
from Cython.Compiler.ExprNodes import *
class LinesResult(object):
def __init__(self):
self.lines = []
self.s = u""
def put(self, s):
self.s += s
def newline(self):
self.lines.append(self.s)
self.s = u""
def putline(self, s):
self.put(s)
self.newline()
class DeclarationWriter(TreeVisitor):
indent_string = u" "
def __init__(self, result = None):
super(DeclarationWriter, self).__init__()
if result is None:
result = LinesResult()
self.result = result
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
def write(self, tree):
self.visit(tree)
return self.result
def indent(self):
self.numindents += 1
def dedent(self):
self.numindents -= 1
def startline(self, s = u""):
self.result.put(self.indent_string * self.numindents + s)
def put(self, s):
self.result.put(s)
def putline(self, s):
self.result.putline(self.indent_string * self.numindents + s)
def endline(self, s = u""):
self.result.putline(s)
def line(self, s):
self.startline(s)
self.endline()
def comma_separated_list(self, items, output_rhs=False):
if len(items) > 0:
for item in items[:-1]:
self.visit(item)
if output_rhs and item.default is not None:
self.put(u" = ")
self.visit(item.default)
self.put(u", ")
self.visit(items[-1])
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
def visit_ModuleNode(self, node):
self.visitchildren(node)
def visit_StatListNode(self, node):
self.visitchildren(node)
def visit_CDefExternNode(self, node):
if node.include_file is None:
file = u'*'
else:
file = u'"%s"' % node.include_file
self.putline(u"cdef extern from %s:" % file)
self.indent()
self.visit(node.body)
self.dedent()
def visit_CPtrDeclaratorNode(self, node):
self.put('*')
self.visit(node.base)
def visit_CReferenceDeclaratorNode(self, node):
self.put('&')
self.visit(node.base)
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CFuncDeclaratorNode(self, node):
# TODO: except, gil, etc.
self.visit(node.base)
self.put(u'(')
self.comma_separated_list(node.args)
self.endline(u')')
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
def visit_CSimpleBaseTypeNode(self, node):
# See Parsing.p_sign_and_longness
if node.is_basic_c_type:
self.put(("unsigned ", "", "signed ")[node.signed])
if node.longness < 0:
self.put("short " * -node.longness)
elif node.longness > 0:
self.put("long " * node.longness)
self.put(node.name)
def visit_CComplexBaseTypeNode(self, node):
self.put(u'(')
self.visit(node.base_type)
self.visit(node.declarator)
self.put(u')')
def visit_CNestedBaseTypeNode(self, node):
self.visit(node.base_type)
self.put(u'.')
self.put(node.name)
def visit_TemplatedTypeNode(self, node):
self.visit(node.base_type_node)
self.put(u'[')
self.comma_separated_list(node.positional_args + node.keyword_args.key_value_pairs)
self.put(u']')
def visit_CVarDefNode(self, node):
self.startline(u"cdef ")
self.visit(node.base_type)
self.put(u" ")
self.comma_separated_list(node.declarators, output_rhs=True)
self.endline()
def visit_container_node(self, node, decl, extras, attributes):
# TODO: visibility
self.startline(decl)
if node.name:
self.put(u' ')
self.put(node.name)
if node.cname is not None:
self.put(u' "%s"' % node.cname)
if extras:
self.put(extras)
self.endline(':')
self.indent()
if not attributes:
self.putline('pass')
else:
for attribute in attributes:
self.visit(attribute)
self.dedent()
def visit_CStructOrUnionDefNode(self, node):
if node.typedef_flag:
decl = u'ctypedef '
else:
decl = u'cdef '
if node.visibility == 'public':
decl += u'public '
if node.packed:
decl += u'packed '
decl += node.kind
self.visit_container_node(node, decl, None, node.attributes)
def visit_CppClassNode(self, node):
extras = ""
if node.templates:
extras = u"[%s]" % ", ".join(node.templates)
if node.base_classes:
extras += "(%s)" % ", ".join(node.base_classes)
self.visit_container_node(node, u"cdef cppclass", extras, node.attributes)
def visit_CEnumDefNode(self, node):
self.visit_container_node(node, u"cdef enum", None, node.items)
def visit_CEnumDefItemNode(self, node):
self.startline(node.name)
if node.cname:
self.put(u' "%s"' % node.cname)
if node.value:
self.put(u" = ")
self.visit(node.value)
self.endline()
def visit_CClassDefNode(self, node):
assert not node.module_name
if node.decorators:
for decorator in node.decorators:
self.visit(decorator)
self.startline(u"cdef class ")
self.put(node.class_name)
if node.base_class_name:
self.put(u"(")
if node.base_class_module:
self.put(node.base_class_module)
self.put(u".")
self.put(node.base_class_name)
self.put(u")")
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CTypeDefNode(self, node):
self.startline(u"ctypedef ")
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
self.endline()
def visit_FuncDefNode(self, node):
self.startline(u"def %s(" % node.name)
self.comma_separated_list(node.args)
self.endline(u"):")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CArgDeclNode(self, node):
if node.base_type.name is not None:
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
if node.default is not None:
self.put(u" = ")
self.visit(node.default)
def visit_CImportStatNode(self, node):
self.startline(u"cimport ")
self.put(node.module_name)
if node.as_name:
self.put(u" as ")
self.put(node.as_name)
self.endline()
def visit_FromCImportStatNode(self, node):
self.startline(u"from ")
self.put(node.module_name)
self.put(u" cimport ")
first = True
for pos, name, as_name, kind in node.imported_names:
assert kind is None
if first:
first = False
else:
self.put(u", ")
self.put(name)
if as_name:
self.put(u" as ")
self.put(as_name)
self.endline()
def visit_NameNode(self, node):
self.put(node.name)
def visit_IntNode(self, node):
self.put(node.value)
def visit_NoneNode(self, node):
self.put(u"None")
def visit_NotNode(self, node):
self.put(u"(not ")
self.visit(node.operand)
self.put(u")")
def visit_DecoratorNode(self, node):
self.startline("@")
self.visit(node.decorator)
self.endline()
def visit_BinopNode(self, node):
self.visit(node.operand1)
self.put(u" %s " % node.operator)
self.visit(node.operand2)
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
def visit_BoolNode(self, node):
self.put(str(node.value))
# FIXME: represent string nodes correctly
def visit_StringNode(self, node):
value = node.value
if value.encoding is not None:
value = value.encode(value.encoding)
self.put(repr(value))
def visit_PassStatNode(self, node):
self.startline(u"pass")
self.endline()
class CodeWriter(DeclarationWriter):
def visit_SingleAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_CascadedAssignmentNode(self, node):
self.startline()
for lhs in node.lhs_list:
self.visit(lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_PrintStatNode(self, node):
self.startline(u"print ")
self.comma_separated_list(node.arg_tuple.args)
if not node.append_newline:
self.put(u",")
self.endline()
def visit_ForInStatNode(self, node):
self.startline(u"for ")
self.visit(node.target)
self.put(u" in ")
self.visit(node.iterator.sequence)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
if node.else_clause is not None:
self.line(u"else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_IfStatNode(self, node):
# The IfClauseNode is handled directly without a seperate match
# for clariy.
self.startline(u"if ")
self.visit(node.if_clauses[0].condition)
self.endline(":")
self.indent()
self.visit(node.if_clauses[0].body)
self.dedent()
for clause in node.if_clauses[1:]:
self.startline("elif ")
self.visit(clause.condition)
self.endline(":")
self.indent()
self.visit(clause.body)
self.dedent()
if node.else_clause is not None:
self.line("else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_SequenceNode(self, node):
self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.comma_separated_list(node.args)
self.put(")")
def visit_GeneralCallNode(self, node):
self.visit(node.function)
self.put(u"(")
posarg = node.positional_args
if isinstance(posarg, AsTupleNode):
self.visit(posarg.arg)
else:
self.comma_separated_list(posarg)
if node.keyword_args is not None or node.starstar_arg is not None:
raise Exception("Not implemented yet")
self.put(u")")
def visit_ExprStatNode(self, node):
self.startline()
self.visit(node.expr)
self.endline()
def visit_InPlaceAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" %s= " % node.operator)
self.visit(node.rhs)
self.endline()
def visit_WithStatNode(self, node):
self.startline()
self.put(u"with ")
self.visit(node.manager)
if node.target is not None:
self.put(u" as ")
self.visit(node.target)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_TryFinallyStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
self.line(u"finally:")
self.indent()
self.visit(node.finally_clause)
self.dedent()
def visit_TryExceptStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
for x in node.except_clauses:
self.visit(x)
if node.else_clause is not None:
self.visit(node.else_clause)
def visit_ExceptClauseNode(self, node):
self.startline(u"except")
if node.pattern is not None:
self.put(u" ")
self.visit(node.pattern)
if node.target is not None:
self.put(u", ")
self.visit(node.target)
self.endline(":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_ReturnStatNode(self, node):
self.startline("return ")
self.visit(node.value)
self.endline()
def visit_ReraiseStatNode(self, node):
self.line("raise")
def visit_ImportNode(self, node):
self.put(u"(import %s)" % node.module_name.value)
def visit_TempsBlockNode(self, node):
"""
Temporaries are output like $1_1', where the first number is
an index of the TempsBlockNode and the second number is an index
of the temporary which that block allocates.
"""
idx = 0
for handle in node.temps:
self.tempnames[handle] = "$%d_%d" % (self.tempblockindex, idx)
idx += 1
self.tempblockindex += 1
self.visit(node.body)
def visit_TempRefNode(self, node):
self.put(self.tempnames[node.handle])
class PxdWriter(DeclarationWriter):
def __call__(self, node):
print u'\n'.join(self.write(node).lines)
return node
def visit_CFuncDefNode(self, node):
if 'inline' in node.modifiers:
return
if node.overridable:
self.startline(u'cpdef ')
else:
self.startline(u'cdef ')
if node.visibility != 'private':
self.put(node.visibility)
self.put(u' ')
if node.api:
self.put(u'api ')
self.visit(node.declarator)
def visit_StatNode(self, node):
pass
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/Cython/CodeWriter.py
|
Python
|
gpl-2.0
| 14,900
|
[
"VisIt"
] |
a925b97e5d898a00155e034aec71e67f2ae9407e829bedaa62e93c2fd151c712
|
# Copyright (C) 2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
espressopp.interaction.TabulatedSubEnsAngular
***************************************
.. function:: espressopp.interaction.TabulatedSubEnsAngular(dim, itype, filenames)
:param dim: Number of potentials to be used for this interaction
:param itype: The interpolation type: 1 - linear, 2 - akima spline, 3 - cubic spline
:param filenames: The tabulated potential filenames.
:type itype: int
:type filename: str
.. function:: espressopp.interaction.FixedTripleListTabulatedSubEnsAngular(system, ftl, potential)
:param system: The Espresso++ system object.
:param ftl: The FixedTripleList.
:param potential: The potential.
:type system: espressopp.System
:type ftl: espressopp.FixedTripleList
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedTripleListTabulatedSubEnsAngular.setPotential(potential)
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedTripleListTypesTabulatedSubEnsAngular(system, ftl)
:param system: The Espresso++ system object.
:type system: espressopp.System
:param ftl: The FixedTriple list.
:type ftl: espressopp.FixedTripleList
.. function:: espressopp.interaction.FixedTripleListTypesTabulatedSubEnsAngular.setPotential(type1, type2, type3, potential)
Defines angular potential for interaction between particles of types type1-type2-type3.
:param type1: Type of particle 1.
:type type1: int
:param type2: Type of particle 2.
:type type2: int
:param type3: Type of particle 3.
:type type3: int
:param potential: The potential to set up.
:type potential: espressopp.interaction.AngularPotential
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.AngularPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_TabulatedSubEnsAngular, \
interaction_FixedTripleListTabulatedSubEnsAngular, \
interaction_FixedTripleListTypesTabulatedSubEnsAngular
class TabulatedSubEnsAngularLocal(AngularPotentialLocal, interaction_TabulatedSubEnsAngular):
def __init__(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_TabulatedSubEnsAngular)
class FixedTripleListTabulatedSubEnsAngularLocal(InteractionLocal, interaction_FixedTripleListTabulatedSubEnsAngular):
def __init__(self, system, ftl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleListTabulatedSubEnsAngular, system, ftl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
class FixedTripleListTypesTabulatedSubEnsAngularLocal(InteractionLocal, interaction_FixedTripleListTypesTabulatedSubEnsAngular):
def __init__(self, system, ftl):
if pmi.workerIsActive():
cxxinit(self, interaction_FixedTripleListTypesTabulatedSubEnsAngular, system, ftl)
def setPotential(self, type1, type2, type3, potential):
if pmi.workerIsActive():
self.cxxclass.setPotential(self, type1, type2, type3, potential)
def getPotential(self, type1, type2, type3):
if pmi.workerIsActive():
return self.cxxclass.getPotential(self, type1, type2, type3)
def setFixedTripleList(self, ftl):
if pmi.workerIsActive():
self.cxxclass.setFixedTripleList(self, ftl)
def getFixedTripleList(self):
if pmi.workerIsActive():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class TabulatedSubEnsAngular(AngularPotential):
'The TabulatedSubEnsAngular potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.TabulatedSubEnsAngularLocal',
pmicall = ['weight_get', 'weight_set',
'alpha_get', 'alpha_set', 'targetProb_get', 'targetProb_set',
'colVarSd_get', 'colVarSd_set',
'dimension_get', 'filenames_get', 'filename_get',
'filename_set', 'addInteraction', 'colVarRefs_get',
'colVarRef_get']
)
class FixedTripleListTabulatedSubEnsAngular(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListTabulatedSubEnsAngularLocal',
pmicall = ['setPotential', 'getFixedTripleList']
)
class FixedTripleListTypesTabulatedSubEnsAngular(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleListTypesTabulatedSubEnsAngularLocal',
pmicall = ['setPotential','getPotential', 'setFixedTripleList', 'getFixedTripleList']
)
|
MrTheodor/espressopp
|
src/interaction/TabulatedSubEnsAngular.py
|
Python
|
gpl-3.0
| 5,918
|
[
"ESPResSo"
] |
74624a26833a549bc9efd6422e1bece05cbe2838b5579a9bb06fea0f23081fb9
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
__tests__ = 'stoqlib/domain/loan.py'
import decimal
from kiwi.currency import currency
from stoqlib.exceptions import DatabaseInconsistency
from stoqlib.database.runtime import get_current_branch
from stoqlib.domain.loan import Loan, LoanItem, _
from stoqlib.domain.product import StockTransactionHistory
from stoqlib.domain.taxes import (ProductTaxTemplate, ProductIcmsTemplate,
ProductIpiTemplate)
from stoqlib.domain.test.domaintest import DomainTest
class TestLoan(DomainTest):
def test_get_status_name(self):
loan = self.create_loan()
for status in loan.statuses:
self.assertEquals(loan.get_status_name(status),
loan.statuses[status])
with self.assertRaises(DatabaseInconsistency) as error:
loan.get_status_name(9)
expected = _("Invalid status %d") % 9
self.assertEquals(str(error.exception), expected)
def test_add_item(self):
loan = self.create_loan()
loan_item = self.create_loan_item()
with self.assertRaises(AssertionError):
loan.add_item(loan_item)
loan_item.loan = None
loan.add_item(loan_item)
self.assertEquals(loan_item.loan, loan)
def test_add_sellable(self):
loan = self.create_loan()
sellable = self.create_sellable()
self.create_storable(product=sellable.product,
branch=loan.branch, stock=2)
loan.add_sellable(sellable, quantity=1, price=10)
items = list(loan.get_items())
self.assertEquals(len(items), 1)
self.failIf(items[0].sellable is not sellable)
def test_get_total_amount(self):
loan_item = self.create_loan_item()
self.assertEquals(loan_item.loan.get_total_amount(), currency(10))
def test_get_client_name(self):
loan = self.create_loan()
self.assertIs(loan.get_client_name(), u'')
client = self.create_client(name=u'Client XX')
loan.client = client
self.assertEquals(loan.get_client_name(), u'Client XX')
def test_get_branch_name(self):
loan = self.create_loan()
self.assertEquals(loan.get_branch_name(), u'Moda Stoq')
branch = self.create_branch(name=u'New Branch')
loan.branch = branch
self.assertEquals(loan.get_branch_name(), u'New Branch shop')
loan.branch = None
self.assertEquals(loan.get_branch_name(), u'')
def test_get_responsible_name(self):
loan = self.create_loan()
self.assertEquals(loan.get_responsible_name(), u'Administrator')
loan.responsible.person.name = u''
self.assertEquals(loan.get_responsible_name(), u'')
def test_sync_stock(self):
loan = self.create_loan()
for i in range(5):
item = self.create_loan_item(loan=loan)
if i % 2 == 0:
item.sellable.product.manage_stock = False
elif i % 3 == 0:
item._original_quantity = 10 * i
item.quantity = i * 5
item.return_quantity = 0
item._original_return_quantity = 0
else:
item._original_quantity = 3
item.quantity = 5
item.return_quantity = 5
item._original_return_quantity = 6
results = self.store.find(LoanItem, loan=loan)
stock_item = results[1].storable.get_stock_item(branch=loan.branch,
batch=None)
before_quantity = stock_item.quantity
loan.sync_stock()
after_quantity = stock_item.quantity
compare = [[before_quantity, after_quantity]]
stock_item = results[3].storable.get_stock_item(branch=loan.branch,
batch=None)
before_quantity = stock_item.quantity
loan.sync_stock()
after_quantity = stock_item.quantity
compare.append([before_quantity, after_quantity])
expected = [[decimal.Decimal(10), decimal.Decimal(7)],
[decimal.Decimal(25), decimal.Decimal(25)]]
self.assertEquals(compare, expected)
def test_can_close(self):
loan_item = self.create_loan_item()
loan = loan_item.loan
self.assertEquals(loan.status, Loan.STATUS_OPEN)
self.failIf(loan.can_close())
loan_item.return_quantity = loan_item.quantity
self.failUnless(loan.can_close())
loan.status = Loan.STATUS_CLOSED
result = loan.can_close()
self.assertFalse(result)
def test_close(self):
loan_item = self.create_loan_item()
loan = loan_item.loan
self.assertEquals(loan.status, Loan.STATUS_OPEN)
self.failIf(loan.can_close())
loan_item.return_quantity = loan_item.quantity
self.failUnless(loan.can_close())
loan.close()
self.assertEquals(loan.status, Loan.STATUS_CLOSED)
def test_remove_item(self):
loan_item = self.create_loan_item()
loan = loan_item.loan
total_items = self.store.find(LoanItem, loan=loan).count()
self.assertEquals(total_items, 1)
loan.remove_item(loan_item)
total_items = self.store.find(LoanItem, loan=loan).count()
self.assertEquals(total_items, 0)
with self.sysparam(SYNCHRONIZED_MODE=True):
loan_item = self.create_loan_item()
loan = loan_item.loan
before_remove = self.store.find(LoanItem).count()
loan.remove_item(loan_item)
after_remove = self.store.find(LoanItem).count()
# The item should still be on the database
self.assertEqual(before_remove, after_remove)
# But not related to the loan
self.assertEquals(self.store.find(LoanItem, loan=loan).count(), 0)
def test_get_available_discount_for_items(self):
loan_item = self.create_loan_item()
loan_item.loan.client = self.create_client()
user = self.create_user()
user.profile.max_discount = decimal.Decimal('5')
discount = loan_item.loan.get_available_discount_for_items(user)
self.assertEqual(discount, decimal.Decimal('0.50'))
# Test exclude item
loan = self.create_loan()
loan_item2 = self.create_loan_item()
loan_item2.loan = None
loan.add_item(loan_item2)
discount = loan.get_available_discount_for_items(user, loan_item2)
self.assertEqual(discount, decimal.Decimal('0'))
# Test surcharge
loan_item2.set_discount(decimal.Decimal('-5'))
self.assertEqual(loan_item2.price, currency('10.50'))
discount = loan_item2.loan.get_available_discount_for_items(user)
self.assertEqual(discount, decimal.Decimal('0'))
def test_set_items_discount(self):
loan = self.create_loan()
loan_item1 = self.create_loan_item()
loan_item2 = self.create_loan_item()
loan_item1.loan = None
loan_item2.loan = None
loan.add_item(loan_item1)
loan.add_item(loan_item2)
self.assertEqual(loan.get_total_amount(), 20)
# 5% of discount
loan.set_items_discount(5)
self.assertEqual(loan.get_total_amount(), 19)
# NF-e operations
def test_comments(self):
loan = self.create_loan()
loan.notes = u'Loan notes 1\n Loan notes 2'
expected_notes = u'Loan notes 1\n Loan notes 2'
comments = '\n'.join(c.comment for c in loan.comments)
self.assertEquals(expected_notes, comments)
def test_discount_value(self):
loan = self.create_loan()
loan_item1 = self.create_loan_item(loan=loan)
self.assertEqual(loan.invoice_total, currency(10))
# Loan item price < base_price
loan_item1.price = 9
self.assertEquals(loan.discount_value, currency(1))
self.assertEqual(loan.invoice_total, currency(9))
# Loan item price > base_price
loan_item2 = self.create_loan_item()
loan_item2.price = 20
loan_item2.loan = loan
self.assertEqual(loan.invoice_total, currency(29))
self.assertEquals(loan.discount_value, currency(1))
self.assertEquals(loan.invoice_subtotal, currency(20))
def test_get_items(self):
loan = self.create_loan()
loan_item = self.create_loan_item(loan=loan)
items = loan.get_items()
self.assertEquals(items[0], loan_item)
def test_recipient(self):
client = self.create_client()
loan = self.create_loan(client=client)
self.assertEquals(loan.recipient, client.person)
def test_invoice_number(self):
# FIXME: Check using the invoice number saved in new database table.
loan = self.create_loan()
self.assertEquals(loan.invoice_number, 1)
def test_operation_nature(self):
# FIXME: Check using the operation_nature that will be saved in new field.
loan = self.create_loan()
self.assertEquals(loan.operation_nature, u'Loan')
class TestLoanItem(DomainTest):
def test_storm_loaded(self):
item = self.create_loan_item()
item.return_quantity = 2
self.assertEquals(item._original_quantity, 0)
self.assertEquals(item._original_return_quantity, 0)
item.__storm_loaded__()
self.assertEquals(item._original_quantity, 1)
self.assertEquals(item._original_return_quantity, 2)
def test_sync_stock(self):
loan = self.create_loan()
product = self.create_product()
branch = get_current_branch(self.store)
storable = self.create_storable(product, branch, 4)
loan.branch = branch
initial = storable.get_balance_for_branch(branch)
sellable = product.sellable
# creates a loan with 4 items of the same product
quantity = 4
loan_item = loan.add_sellable(sellable, quantity=quantity, price=10)
loan_item.sync_stock()
self.assertEquals(loan_item.quantity, quantity)
self.assertEquals(loan_item.return_quantity, 0)
self.assertEquals(loan_item.sale_quantity, 0)
# The quantity loaned items should be removed from stock
self.assertEquals(
storable.get_balance_for_branch(branch),
initial - quantity)
# Sell one of the loaned items and return one item (leaving 2 in the
# loan)
loan_item.return_quantity = 1
loan_item.sale_quantity = 1
loan_item.sync_stock()
self.assertEquals(loan_item.quantity, quantity)
self.assertEquals(loan_item.return_quantity, 1)
self.assertEquals(loan_item.sale_quantity, 1)
# The return_quantity should be returned to the stock
self.assertEquals(
storable.get_balance_for_branch(branch),
initial - quantity + loan_item.return_quantity)
# Return the 2 remaining products in this loan.
loan_item.return_quantity += 2
loan_item.sync_stock()
self.assertEquals(loan_item.quantity, quantity)
self.assertEquals(loan_item.return_quantity, 3)
self.assertEquals(loan_item.sale_quantity, 1)
# The return_quantity should be returned to the stock
self.assertEquals(
storable.get_balance_for_branch(branch),
initial - quantity + loan_item.return_quantity)
def test_sync_stock_with_storable(self):
loan = self.create_loan(branch=self.create_branch())
product = self.create_product()
storable = self.create_storable(product, loan.branch, is_batch=True)
batch = self.create_storable_batch(storable=storable)
storable.increase_stock(10, loan.branch,
StockTransactionHistory.TYPE_INITIAL,
None, batch=batch)
loan_item = loan.add_sellable(product.sellable, quantity=4, price=10,
batch=batch)
self.assertEqual(batch.get_balance_for_branch(loan.branch), 10)
loan_item.sync_stock()
self.assertEqual(batch.get_balance_for_branch(loan.branch), 6)
self.assertEquals(loan_item.quantity, 4)
self.assertEquals(loan_item.return_quantity, 0)
self.assertEquals(loan_item.sale_quantity, 0)
# The sale quantity should still be decreased
loan_item.sale_quantity = 2
loan_item.sync_stock()
self.assertEqual(batch.get_balance_for_branch(loan.branch), 6)
# The return quantity should go back to the stock
loan_item.return_quantity = 2
loan_item.sync_stock()
self.assertEqual(batch.get_balance_for_branch(loan.branch), 8)
def test_remaining_quantity(self):
loan = self.create_loan()
product = self.create_product()
branch = get_current_branch(self.store)
self.create_storable(product, branch, 4)
loan.branch = branch
# creates a loan with 4 items of the same product
loan_item = loan.add_sellable(product.sellable, quantity=4, price=10)
self.assertEqual(loan_item.get_remaining_quantity(), 4)
loan_item.sale_quantity = 1
self.assertEqual(loan_item.get_remaining_quantity(), 3)
loan_item.return_quantity = 2
self.assertEqual(loan_item.get_remaining_quantity(), 1)
def test_get_quantity_unit_string(self):
loan_item = self.create_loan_item()
loan_item.sellable.unit = self.create_sellable_unit(description=u'Kg')
self.assertEquals(loan_item.get_quantity_unit_string(), u'1.000 Kg')
def test_get_total(self):
item = self.create_loan_item()
self.assertEquals(item.get_total(), currency(10))
def test_set_discount(self):
loan_item = self.create_loan_item()
self.assertEqual(loan_item.get_total(), currency(10))
# It requires a currency value but is 5% of discount
loan_item.set_discount(decimal.Decimal('4.9'))
self.assertEqual(loan_item.get_total(), currency('9.51'))
# NF-e operations
def test_nfe_data(self):
# FIXME: Improve this test after fix the properties, icms_info and ipi_info.
loan = self.create_loan()
product = self.create_product()
icms_tax_template = ProductTaxTemplate(store=self.store,
tax_type=ProductTaxTemplate.TYPE_ICMS)
icms_template = ProductIcmsTemplate(store=self.store,
product_tax_template=icms_tax_template)
ipi_tax_template = ProductTaxTemplate(store=self.store,
tax_type=ProductTaxTemplate.TYPE_IPI)
ipi_template = ProductIpiTemplate(store=self.store,
product_tax_template=ipi_tax_template)
product.icms_template = icms_template
product.ipi_template = ipi_template
loan_item = loan.add_sellable(product.sellable)
self.assertEquals(loan_item.icms_info, None)
self.assertEquals(loan_item.ipi_info, None)
def test_nfe_cfop_code(self):
loan_item = self.create_loan_item()
client = self.create_client()
loan_item.loan.client = client
self.create_address(person=client.person)
# Branch address isn't the same of client
self.assertEquals(loan_item.nfe_cfop_code, u'6917')
# Branch address is the same of client
loan_item.loan.branch.person = client.person
self.assertEquals(loan_item.nfe_cfop_code, u'5917')
|
tiagocardosos/stoq
|
stoqlib/domain/test/test_loan.py
|
Python
|
gpl-2.0
| 16,508
|
[
"VisIt"
] |
d8d7fdb6acb9143d498cdd8c3e1af7b3fdba5ec4ebb140391bd6199301b5450c
|
""" MQProducer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC import gLogger
from DIRAC.Resources.MessageQueue.Utilities import getDestinationAddress, getMQService
class MQProducer(object):
def __init__(self, mqManager, mqURI, producerId):
self._connectionManager = mqManager
self._mqURI = mqURI
self._destination = getDestinationAddress(self._mqURI)
self._mqService = getMQService(self._mqURI)
self._id = producerId
self.log = gLogger.getSubLogger(self.__class__.__name__)
def put(self, msg):
result = self._connectionManager.getConnector(self._mqService)
if result["OK"]:
connector = result["Value"]
return connector.put(message=msg, parameters={"destination": self._destination})
return result
def close(self):
"""Function closes the connection for this client.
The producer id is removed from the connection storage.
It is not guaranteed that the connection will be
removed cause other messengers can be still using it.
Returns:
S_OK or S_ERROR: Error appears in case if the connection was already
closed for this producer.
"""
return self._connectionManager.stopConnection(mqURI=self._mqURI, messengerId=self._id)
|
ic-hep/DIRAC
|
src/DIRAC/Resources/MessageQueue/MQProducer.py
|
Python
|
gpl-3.0
| 1,400
|
[
"DIRAC"
] |
cf4e7cc445d8fa998d542aa2d0f5e250484c5dc821198554c424e617e315ba50
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the main loading API.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests # isort:skip
import pathlib
from unittest import mock
import netCDF4
import iris
import iris.io
@tests.skip_data
class TestLoad(tests.IrisTest):
def test_normal(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
cubes = iris.load(paths)
self.assertEqual(len(cubes), 1)
def test_path_object(self):
paths = (
pathlib.Path(tests.get_data_path(["PP", "aPPglob1", "global.pp"])),
)
cubes = iris.load(paths)
self.assertEqual(len(cubes), 1)
def test_nonexist(self):
paths = (
tests.get_data_path(["PP", "aPPglob1", "global.pp"]),
tests.get_data_path(["PP", "_guaranteed_non_exist.pp"]),
)
with self.assertRaises(IOError) as error_trap:
_ = iris.load(paths)
self.assertIn(
"One or more of the files specified did not exist",
str(error_trap.exception),
)
def test_nonexist_wild(self):
paths = (
tests.get_data_path(["PP", "aPPglob1", "global.pp"]),
tests.get_data_path(["PP", "_guaranteed_non_exist_*.pp"]),
)
with self.assertRaises(IOError) as error_trap:
_ = iris.load(paths)
self.assertIn(
"One or more of the files specified did not exist",
str(error_trap.exception),
)
def test_bogus(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
cubes = iris.load(paths, "wibble")
self.assertEqual(len(cubes), 0)
def test_real_and_bogus(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
cubes = iris.load(paths, ("air_temperature", "wibble"))
self.assertEqual(len(cubes), 1)
def test_duplicate(self):
paths = (
tests.get_data_path(["PP", "aPPglob1", "global.pp"]),
tests.get_data_path(["PP", "aPPglob1", "gl?bal.pp"]),
)
cubes = iris.load(paths)
self.assertEqual(len(cubes), 2)
@tests.skip_data
class TestLoadCube(tests.IrisTest):
def test_normal(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
_ = iris.load_cube(paths)
def test_path_object(self):
paths = (
pathlib.Path(tests.get_data_path(["PP", "aPPglob1", "global.pp"])),
)
_ = iris.load_cube(paths)
def test_not_enough(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
with self.assertRaises(iris.exceptions.ConstraintMismatchError):
iris.load_cube(paths, "wibble")
def test_too_many(self):
paths = (
tests.get_data_path(["PP", "aPPglob1", "global.pp"]),
tests.get_data_path(["PP", "aPPglob1", "gl?bal.pp"]),
)
with self.assertRaises(iris.exceptions.ConstraintMismatchError):
iris.load_cube(paths)
@tests.skip_data
class TestLoadCubes(tests.IrisTest):
def test_normal(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
cubes = iris.load_cubes(paths)
self.assertEqual(len(cubes), 1)
def test_path_object(self):
paths = (
pathlib.Path(tests.get_data_path(["PP", "aPPglob1", "global.pp"])),
)
cubes = iris.load_cubes(paths)
self.assertEqual(len(cubes), 1)
def test_not_enough(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
with self.assertRaises(iris.exceptions.ConstraintMismatchError):
iris.load_cubes(paths, "wibble")
def test_not_enough_multi(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
with self.assertRaises(iris.exceptions.ConstraintMismatchError):
iris.load_cubes(paths, ("air_temperature", "wibble"))
def test_too_many(self):
paths = (
tests.get_data_path(["PP", "aPPglob1", "global.pp"]),
tests.get_data_path(["PP", "aPPglob1", "gl?bal.pp"]),
)
with self.assertRaises(iris.exceptions.ConstraintMismatchError):
iris.load_cube(paths)
@tests.skip_data
class TestLoadRaw(tests.IrisTest):
def test_normal(self):
paths = (tests.get_data_path(["PP", "aPPglob1", "global.pp"]),)
cubes = iris.load_raw(paths)
self.assertEqual(len(cubes), 1)
def test_path_object(self):
paths = (
pathlib.Path(tests.get_data_path(["PP", "aPPglob1", "global.pp"])),
)
cubes = iris.load_raw(paths)
self.assertEqual(len(cubes), 1)
class TestOPeNDAP(tests.IrisTest):
def setUp(self):
self.url = "http://geoport.whoi.edu:80/thredds/dodsC/bathy/gom15"
def test_load_http_called(self):
# Check that calling iris.load_* with an http URI triggers a call to
# ``iris.io.load_http``
class LoadHTTPCalled(Exception):
pass
def new_load_http(passed_urls, *args, **kwargs):
self.assertEqual(len(passed_urls), 1)
self.assertEqual(self.url, passed_urls[0])
raise LoadHTTPCalled()
try:
orig = iris.io.load_http
iris.io.load_http = new_load_http
for fn in [
iris.load,
iris.load_raw,
iris.load_cube,
iris.load_cubes,
]:
with self.assertRaises(LoadHTTPCalled):
fn(self.url)
finally:
iris.io.load_http = orig
def test_netCDF_Dataset_call(self):
# Check that load_http calls netCDF4.Dataset and supplies the expected URL.
# To avoid making a request to an OPeNDAP server in a test, instead
# mock the call to netCDF.Dataset so that it returns a dataset for a
# local file.
filename = tests.get_data_path(
("NetCDF", "global", "xyt", "SMALL_total_column_co2.nc")
)
fake_dataset = netCDF4.Dataset(filename)
with mock.patch(
"netCDF4.Dataset", return_value=fake_dataset
) as dataset_loader:
next(iris.io.load_http([self.url], callback=None))
dataset_loader.assert_called_with(self.url, mode="r")
if __name__ == "__main__":
tests.main()
|
SciTools/iris
|
lib/iris/tests/test_load.py
|
Python
|
lgpl-3.0
| 6,632
|
[
"NetCDF"
] |
aba0b7d227091f4fbf438d430c6fdf74ee4a47b42c9e14ba66a1f5c9ab7aa261
|
#!/usr/bin/env python
# This example demonstrates the use of VTK data arrays as attribute
# data as well as field data. It creates geometry (vtkPolyData) as
# well as attribute data explicitly.
import vtk
# Create a float array which represents the points.
pcoords = vtk.vtkFloatArray()
# Note that by default, an array has 1 component.
# We have to change it to 3 for points
pcoords.SetNumberOfComponents(3)
# We ask pcoords to allocate room for at least 4 tuples
# and set the number of tuples to 4.
pcoords.SetNumberOfTuples(4)
# Assign each tuple. There are 5 specialized versions of SetTuple:
# SetTuple1 SetTuple2 SetTuple3 SetTuple4 SetTuple9
# These take 1, 2, 3, 4 and 9 components respectively.
pcoords.SetTuple3(0, 0.0, 0.0, 0.0)
pcoords.SetTuple3(1, 0.0, 1.0, 0.0)
pcoords.SetTuple3(2, 1.0, 0.0, 0.0)
pcoords.SetTuple3(3, 1.0, 1.0, 0.0)
# Create vtkPoints and assign pcoords as the internal data array.
points = vtk.vtkPoints()
points.SetData(pcoords)
# Create the cells. In this case, a triangle strip with 2 triangles
# (which can be represented by 4 points)
strips = vtk.vtkCellArray()
strips.InsertNextCell(4)
strips.InsertCellPoint(0)
strips.InsertCellPoint(1)
strips.InsertCellPoint(2)
strips.InsertCellPoint(3)
# Create an integer array with 4 tuples. Note that when using
# InsertNextValue (or InsertNextTuple1 which is equivalent in
# this situation), the array will expand automatically
temperature = vtk.vtkIntArray()
temperature.SetName("Temperature")
temperature.InsertNextValue(10)
temperature.InsertNextValue(20)
temperature.InsertNextValue(30)
temperature.InsertNextValue(40)
# Create a double array.
vorticity = vtk.vtkDoubleArray()
vorticity.SetName("Vorticity")
vorticity.InsertNextValue(2.7)
vorticity.InsertNextValue(4.1)
vorticity.InsertNextValue(5.3)
vorticity.InsertNextValue(3.4)
# Create the dataset. In this case, we create a vtkPolyData
polydata = vtk.vtkPolyData()
# Assign points and cells
polydata.SetPoints(points)
polydata.SetStrips(strips)
# Assign scalars
polydata.GetPointData().SetScalars(temperature)
# Add the vorticity array. In this example, this field
# is not used.
polydata.GetPointData().AddArray(vorticity)
# Create the mapper and set the appropriate scalar range
# (default is (0,1)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(polydata)
mapper.SetScalarRange(0, 40)
# Create an actor.
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create the rendering objects.
ren = vtk.vtkRenderer()
ren.AddActor(actor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
renWin.Render()
iren.Start()
|
CMUSV-VisTrails/WorkflowRecommendation
|
examples/vtk_examples/DataManipulation/Arrays.py
|
Python
|
bsd-3-clause
| 2,656
|
[
"VTK"
] |
bdc30db2d7cb8fb09f6649ddee8d307efe2d512b59f34fde90261473dcdf202a
|
from typing import Callable, Iterable, List, Sequence, Tuple, Union, cast
import numpy as np
import tensorflow as tf
from gpflow.base import TensorType
from gpflow.quadrature.deprecated import mvhermgauss
def ndiagquad(
funcs: Union[Callable[..., tf.Tensor], Iterable[Callable[..., tf.Tensor]]],
H: int,
Fmu: Union[TensorType, List[TensorType], Tuple[TensorType, ...]],
Fvar: Union[TensorType, List[TensorType], Tuple[TensorType, ...]],
logspace: bool = False,
**Ys: TensorType,
) -> tf.Tensor:
"""
Computes N Gaussian expectation integrals of one or more functions
using Gauss-Hermite quadrature. The Gaussians must be independent.
The means and variances of the Gaussians are specified by Fmu and Fvar.
The N-integrals are assumed to be taken wrt the last dimensions of Fmu, Fvar.
:param funcs: the integrand(s):
Callable or Iterable of Callables that operates elementwise
:param H: number of Gauss-Hermite quadrature points
:param Fmu: array/tensor or `Din`-tuple/list thereof
:param Fvar: array/tensor or `Din`-tuple/list thereof
:param logspace: if True, funcs are the log-integrands and this calculates
the log-expectation of exp(funcs)
:param **Ys: arrays/tensors; deterministic arguments to be passed by name
Fmu, Fvar, Ys should all have same shape, with overall size `N`
:return: shape is the same as that of the first Fmu
"""
if isinstance(Fmu, (tuple, list)):
assert isinstance(Fvar, (tuple, list)) # Hint for mypy.
Din = len(Fmu)
def unify(f_list: Sequence[TensorType]) -> tf.Tensor:
"""Stack a list of means/vars into a full block."""
return tf.reshape(
tensor=tf.concat([tf.reshape(f, shape=(-1, 1)) for f in f_list], axis=1),
shape=(-1, 1, Din),
)
shape = tf.shape(Fmu[0])
Fmu, Fvar = map(unify, [Fmu, Fvar]) # both [N, 1, Din]
print(Fmu)
print(Fvar)
else:
Din = 1
shape = tf.shape(Fmu)
Fmu, Fvar = [tf.reshape(f, (-1, 1, 1)) for f in [Fmu, Fvar]]
Fmu = cast(TensorType, Fmu)
Fvar = cast(TensorType, Fvar)
xn, wn = mvhermgauss(H, Din)
# xn: H**Din x Din, wn: H**Din
gh_x = xn.reshape(1, -1, Din) # [1, H]**Din x Din
Xall = gh_x * tf.sqrt(2.0 * Fvar) + Fmu # [N, H]**Din x Din
Xs = [Xall[:, :, i] for i in range(Din)] # [N, H]**Din each
gh_w = wn * np.pi ** (-0.5 * Din) # H**Din x 1
for name, Y in Ys.items():
Y = tf.reshape(Y, (-1, 1))
Y = tf.tile(Y, [1, H ** Din]) # broadcast Y to match X
# without the tiling, some calls such as tf.where() (in bernoulli) fail
Ys[name] = Y # now [N, H]**Din
def eval_func(f: Callable[..., tf.Tensor]) -> tf.Tensor:
feval = f(*Xs, **Ys) # f should be elementwise: return shape [N, H]**Din
if logspace:
log_gh_w = np.log(gh_w.reshape(1, -1))
result = tf.reduce_logsumexp(feval + log_gh_w, axis=1)
else:
result = tf.linalg.matmul(feval, gh_w.reshape(-1, 1))
return tf.reshape(result, shape)
if isinstance(funcs, Iterable):
return [eval_func(f) for f in funcs]
return eval_func(funcs)
|
GPflow/GPflow
|
tests/gpflow/quadrature/ndiagquad_old.py
|
Python
|
apache-2.0
| 3,267
|
[
"Gaussian"
] |
dae5bb228802209e2be16affb704d33fc4f61bd020f9744e2209249a115798eb
|
"""
Create a profile object from a `numpy.ndarray` of data
======================================================
Use the TAMOC ambient module to create profiles in netCDF format for use by
TAMOC from idealized laboratory data. This file demonstrates working with the
data input directly by the user as a `numpy.ndarray`.
This script demonstrates the new version of the `ambient.Profile` object, which uses `xarray`. For the older version, which used netCDF datasets, see the script with the same file name but prepended by 'nc'.
Notes
-----
Much of the input data in this script (e.g., columns to extract, column names,
lat and lon location data, date and time, etc.) must be known from the user
(e.g., in this case mostly fictitious) and is hand-coded in the script
text.
Returns
-------
This script generates a `ambient.Profile` object, whose netCDF file is written
to the file::
./Profiles/Profiles/Lab.nc
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from tamoc import ambient
from tamoc import seawater
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Create the synthetic temperature and salinity profiles from idealized
# laboratory conditions
z = np.array([0.0, 2.4])
T = np.array([21.0, 20.0]) + 273.15
S = np.array([0.0, 30.0])
data = xr.Dataset()
data.coords['z'] = z
data['temperature'] = (('z'), T)
data['salinity'] = (('z'), S)
# Create an ambient.Profile object for this dataset
lab = ambient.Profile(data)
# Plot the density profile using the interpolation function
z = np.linspace(lab.z_min,
lab.z_max, 250)
rho = np.zeros(z.shape)
tsp = lab.get_values(z, ['temperature', 'salinity', 'pressure'])
for i in range(len(z)):
rho[i] = seawater.density(tsp[i,0], tsp[i,1], tsp[i,2])
fig = plt.figure()
ax1 = plt.subplot(121)
ax1.plot(rho, z)
ax1.set_xlabel('Density (kg/m^3)')
ax1.set_ylabel('Depth (m)')
ax1.invert_yaxis()
ax1.set_title('Computed data')
plt.show()
|
socolofs/tamoc
|
bin/ambient/profile_from_lab.py
|
Python
|
mit
| 2,237
|
[
"NetCDF"
] |
f5130c8b5ae0f2ee5482af1827578fe9e2f4c483daaaa88a66e190a45502b418
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMclust(RPackage):
"""Gaussian Mixture Modelling for Model-Based Clustering, Classification,
and Density Estimation
Gaussian finite mixture models fitted via EM algorithm for model-based
clustering, classification, and density estimation, including Bayesian
regularization, dimension reduction for visualisation, and resampling-based
inference."""
homepage = "http://www.stat.washington.edu/mclust"
url = "https://cloud.r-project.org/src/contrib/mclust_5.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/mclust"
version('5.4.7', sha256='45f5a666caee5bebd3160922b8655295a25e37f624741f6574365e4ac5a14c23')
version('5.4.5', sha256='75f2963082669485953e4306ffa93db98335ee6afdc1318b95d605d56cb30a72')
version('5.4.4', sha256='ccc31b0ad445e121a447b04988e73232a085c506fcc7ebdf11a3e0754aae3e0d')
version('5.3', sha256='2b1b6d8266ae16b0e96f118df81559f208a568744a7c105af9f9abf1eef6ba40')
depends_on('r@3.0.0:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-mclust/package.py
|
Python
|
lgpl-2.1
| 1,236
|
[
"Gaussian"
] |
831e3eb92103a3df23982dc1ad7b8e43edf55e2e4ae63a1e58faa7fe88e29262
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 31 15:47:34 2016
@author: wxt
"""
from __future__ import division
import copy, collections
import numpy as np
from scipy import sparse
import matplotlib
matplotlib.use('agg')
from tadlib.hitad.aligner import BoundSet, DomainSet, DomainAligner, hierFormat, Container
from tadlib.calfea import analyze
from matplotlib.colors import Normalize
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
if self.vmin == self.vmax:
return np.ma.masked_array(np.interp(value, [self.vmin], [0.5]))
if self.vmin < self.midpoint < self.vmax:
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
elif self.vmin >= self.midpoint:
x, y = [self.vmin, self.vmax], [0.5, 1]
elif self.vmax <= self.midpoint:
x, y = [self.vmin, self.vmax], [0, 0.5]
return np.ma.masked_array(np.interp(value, x, y))
np.seterr(divide = "ignore")
class Chrom(object):
"""
*Chrom* is defined to:
- Hold Hi-C data within a certain chromosome
- Identify hierarchical domains in 4 steps: 1.Calculate adaptive DIs.
2.Identify original candidate bounds by 5-state Gaussian mixture Hidden
Markov Model using adaptive DIs as input. 3.Select TAD bounds from
candidate bounds. 4.Recursively identify inner domain structures of each TAD.
- Visualize any region of the chromosome. Hierarchical domains will be
plotted as boxes along with the diagonal of the heatmap, and adaptive
DI track will be placed on top of the heatmap.
Parameters
----------
chrom : str
Chromosome label.
res : int
Resolution of the Hi-C data in base-pair unit.
hicdata : CSR sparse matrix
Bin-level Hi-C matrix of the specified chromosome.
Attributes
----------
chrom : str
Chromosome label.
res : int
Resolution in base-pair unit.
chromLen : int
Total bin number of the chromosome.
rawMatrix : sparse matrix in Compressed Sparse Row format
CSR sparse matrix is used to extract Hi-C data by slicing conveniently
while guarantee low memory overhead.
"""
defaultwindow = 2000000
minsize = 5
def __init__(self, chrom, res, hicdata):
self.chrom = chrom
self.res = res
self._rm = 1
self._dw = self.defaultwindow // res
self.chromLen = hicdata.shape[0]
self.hmm = None
x, y = hicdata.nonzero()
mat_ = hicdata[x, y]
if isinstance(mat_, np.matrix):
IF = np.array(mat_).ravel()
else:
# mat_ is a sparse matrix
IF = np.array(mat_.todense()).ravel()
IF[np.isnan(IF)] = 0
self.rawMatrix = self._genSparseMatrix(x, y, IF)
del x, y, IF, hicdata
self._state = 'Submitted'
def _genSparseMatrix(self, x, y, IF):
extendLen = self.chromLen + 2*self.chromLen
rawMatrix = sparse.csr_matrix((IF, (x + self.chromLen, y + self.chromLen)),
shape = (extendLen, extendLen))
return rawMatrix
def detectPeaks(self, trends, mph=0, mpd=5):
"""
Detect peaks (local maxima) in a 1-D array intuitively (a peak must
be greater than its immediate neighbors).
Parameters
----------
trends : 1-D numpy ndarray
Data.
mph : float
Only peaks that are greater than this value will be detected.
(Default: 0)
mpd : positive integer
Only peaks whose indices are at least separated by this value will
be reported. (Default: 5)
Returns
-------
ind : 1-D numpy ndarray
Indices of peaks detected in *trends*.
"""
dx = trends[1:] - trends[:-1]
ind = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
sp = np.where(trends==1)[0]
if sp.size:
ind = np.r_[sp[-1], ind]
if dx[-1] > 0:
ind = np.r_[ind, trends.size-1]
# Filter peaks by mph
if ind.size:
ind = ind[trends[ind] > mph]
# Remove small peaks closer than mpd
if ind.size and mpd > 1:
ind = ind[np.argsort(trends[ind])][::-1]
idel = np.zeros(ind.size, dtype = bool)
for i in range(ind.size):
if not idel[i]:
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (trends[ind[i]] > trends[ind])
idel[i] = 0
ind = np.sort(ind[~idel])
return ind
def randomCheck(self, seq, pthre = 0.05):
"""
We use chi square test to test the randomness of a sequence by
looking at the conversion frequency between neighbors in the sequence.
Parameters
----------
seq : str
A string containing only '1' or '0'. (e.g. '101000101')
pthre : float, 0-1
Significance level of the hypothesis tests.
Returns
-------
reject : bool
True if we should reject the null hypothesis (the sequence is
generated randomly) under the selected significance level.
"""
from scipy.stats import chisquare
pairwise = zip(seq[1:], seq[:-1])
d = collections.defaultdict(int)
for k in pairwise:
d[k] += 1
for k in [('0','0'), ('0','1'), ('1','0'), ('1','1')]:
if not k in d:
d[k] = 0
obs = np.array(list(d.values()))
exp = np.ones_like(obs) * obs.mean()
_, pval = chisquare(obs, exp)
reject = pval<=pthre
return reject
def oriWindow(self, P):
"""
Estimate the most appropriate window size for current bin to best
capture the local interaction bias direction.
See Also
--------
tadlib.hitad.chromLev.Chrom.detectPeaks : detect peaks given a 1-D array
tadlib.hitad.chromLev.Chrom.randomCheck : randomness test for a two-valued
(0-1) sequence
"""
noise = P == 0
check = noise[:20]
noiselevel = check.sum() / check.size
if noiselevel > 0.6:
return 0
indices = np.arange(1, P.size+1)
m = [P < 0, # Downstream bias
P > 0] # Upstream bias
trends_1 = m[0].cumsum().astype(float) / np.arange(1, m[0].size + 1)
trends_2 = m[1].cumsum().astype(float) / np.arange(1, m[1].size + 1)
inds = [self.detectPeaks(trends_1, 0.5, 5),
self.detectPeaks(trends_2, 0.5, 5)]
pool = {}
for i in [0, 1]:
for p in inds[i]:
pool[p] = i
for p in sorted(pool):
seq = ''.join([str(int(i)) for i in m[pool[p]][:(p+1)]])
tmp = indices[p]+self._rm+1
if tmp >= self.minsize: # hasn't been systematically tested
if self.randomCheck(seq):
return tmp
return self._dw
def minWindows(self, start, end, maxw):
"""
Estimate best window size for each bin of a given range.
Parameters
----------
start, end : int
Specify range of the bin indices.
maxw : int
Maximum allowable window size.
Attributes
----------
windows : 1-D numpy.ndarray, int32
See Also
--------
tadlib.hitad.chromLev.Chrom.oriWindow : Window size estimation for
a single bin.
"""
start += self.chromLen; end += self.chromLen
self.windows = np.zeros(end - start, dtype = np.int32)
for i in range(start, end):
down = self.rawMatrix[i, i:(i+maxw)].toarray().ravel()
up = self.rawMatrix[(i-maxw+1):(i+1), i].toarray().ravel()[::-1]
down[:self._rm+1] = 0; up[:self._rm+1] = 0
diff = up - down
ws = self.oriWindow(diff[self._rm+1:])
self.windows[i-start] = ws
def calIS(self, idx, w=10):
idx = idx + self.chromLen
sub = self.rawMatrix[idx-w:idx, idx+1:idx+w+1]
return sub.mean()
def preciseBound(self, byregion):
for r in byregion:
tmp = byregion[r]
for d in tmp:
si = d[0]//self.res
ini = np.inf
for i in range(max(si-1,0), min(si+2,self.chromLen)):
IS = self.calIS(i)
if IS < ini:
d[0] = i * self.res
ini = IS
ei = d[1]//self.res
ini = np.inf
for i in range(max(ei-1,0), min(ei+2,self.chromLen)):
IS = self.calIS(i)
if IS < ini:
d[1] = i * self.res
ini = IS
def calDI(self, windows, start):
"""
Calculate Directionality Index (DI) for each bin with adaptive
window size.
Parameters
----------
windows : 1-D numpy.ndarray, int32
Returned by :py:meth:`tadlib.hitad.chromLev.Chrom.minWindows`.
start : int
Starting bin index, the window size of which is taken from the
1st place of *windows*.
Attributes
----------
DIs : 1-D numpy ndarray, float
Calculated adaptive DI array, which has the same size as the
input *windows*.
"""
start = start + self.chromLen
self.DIs = np.zeros(len(windows))
for i in range(start, start + len(windows)):
w = windows[i-start]
if w == 0:
w = self._dw
down = self.rawMatrix[i, i:(i+w)].toarray().ravel()
up = self.rawMatrix[(i-w+1):(i+1), i].toarray().ravel()[::-1]
down = down[self._rm+1:]; up = up[self._rm+1:]
tmp = self._binbias(up, down)
if tmp != 0:
self.DIs[i-start] = tmp
else:
if w < self._dw:
w = self._dw
down = self.rawMatrix[i, i:(i+w)].toarray().ravel()
up = self.rawMatrix[(i-w+1):(i+1), i].toarray().ravel()[::-1]
down = down[self._rm+1:]; up = up[self._rm+1:]
self.DIs[i-start] = self._binbias(up, down)
# trim outliers
lthre = np.percentile(self.DIs[self.DIs<0], 0.1)
hthre = np.percentile(self.DIs[self.DIs>0], 99.9)
self.DIs[self.DIs<lthre] = lthre
self.DIs[self.DIs>hthre] = hthre
def _binbias(self, up, down):
bias = 0
zeromask = (up != 0) & (down != 0)
if zeromask.sum() >= 5:
up = up[zeromask]; down = down[zeromask]
if up.size <= 1:
return bias
upmean = up.mean(); downmean = down.mean()
SD_1 = np.sum((up - upmean) ** 2) / (up.size * (up.size - 1))
SD_2 = np.sum((down - downmean) ** 2) / (down.size * (down.size - 1))
SD_pool = np.sqrt(SD_1 + SD_2)
if SD_pool != 0:
bias = (upmean - downmean) / SD_pool
return bias
def splitChrom(self, DIs):
"""
Split a chromosome into gap-free regions. HMM learning and domain
identification procedures will be performed on these regions separately.
Parameters
----------
DIs : 1-D numpy ndarray, float
Adaptive DI array of the whole chromosome. Generally, we detect
runs of zeros in the array as gaps, which will be cut off the
chromosome, making entire chromosome pieces of gap-free regions.
Attributes
----------
chromRegions : dict, {(start,end):DIs[start:end]}
The keys are gap-free regions, and the values are corresponding
adaptive DI pieces.
gapbins : set
Set of bins (in base-pair unit) located in gap regions.
"""
# minregion and maxgaplen are set intuitively
maxgaplen = max(100000 // self.res, 5)
minregion = maxgaplen * 2
valid_pos = np.where(DIs != 0)[0]
gapsizes = valid_pos[1:] - valid_pos[:-1]
endsIdx = np.where(gapsizes > (maxgaplen + 1))[0]
startsIdx = endsIdx + 1
chromRegions = {}
for i in range(startsIdx.size - 1):
start = valid_pos[startsIdx[i]]
end = valid_pos[endsIdx[i + 1]] + 1
if end - start > minregion:
chromRegions[(start, end)] = DIs[start:end]
if startsIdx.size > 0:
start = valid_pos[startsIdx[-1]]
end = valid_pos[-1] + 1
if end - start > minregion:
chromRegions[(start, end)] = DIs[start:end]
start = valid_pos[0]
end = valid_pos[endsIdx[0]] + 1
if end - start > minregion:
chromRegions[(start, end)] = DIs[start:end]
if not startsIdx.size:
if valid_pos.size > 0:
start = valid_pos[0]
end = valid_pos[-1]
if end - start > minregion:
chromRegions[(start, end)] = DIs[start:end]
gapmask = np.ones(DIs.size, bool)
for r in chromRegions:
gapmask[r[0]:r[1]] = 0
gapbins = set(np.where(gapmask)[0]*self.res)
self.regionDIs, self.gapbins = chromRegions, gapbins
def viterbi(self, seq):
"""
Find the most likely hidden state series using the viterbi algorithm.
Parameters
----------
seq : 1-D numbpy ndarray, float
Adaptive DI array for any region.
Returns
-------
path : list
List of hidden state labels. Has the same length as the input
*seq*.
"""
path = [int(s.name) for i, s in self.hmm.viterbi(seq)[1][1:-1]]
return path
def _getBounds(self, path, junctions=['30']):
"""
Call boundary sites from hidden state series. By default, these
transition modes will be detected as boundaries: "no bias(2) >
domain start(0)", "domain end(4) > domain start(0)", and "domain end(4)
> no bias(2)".
Parameters
----------
path : list
Hidden state series returned by :py:meth:`tadlib.hitad.chromLev.Chrom.viterbi`.
junctions : list of strings
Boundary definitions by using state transition modes.
(Default: ['20','40','42'])
Returns
-------
bounds : 1-D numpy ndarray, int
Detected boundary positions in ascending order. 0 and len(*path*)
will always be included.
"""
pathseq = ''.join(map(str, path))
pieces = [pathseq]
for junc in junctions:
gen = []
for seq in pieces:
tmp = seq.split(junc)
if len(tmp) == 1:
gen.extend(tmp)
else:
gen.append(tmp[0]+junc[0])
for s in tmp[1:-1]:
gen.append(junc[1]+s+junc[0])
gen.append(junc[1]+tmp[-1])
pieces = gen
bounds = np.r_[0, np.r_[list(map(len, pieces))]].cumsum()
return bounds
def pipe(self, seq, start):
"""
Transform an observed sequence into a list of domains.
Parameters
----------
seq : 1-D numbpy ndarray, float
Adaptive DI array for any region.
start : int
Chromosome bin index of the *seq* start.
Returns
-------
domains : list
List of domains in the format ``[start bin, end bin, noise level,
hierarchical level]``.
See Also
--------
tadlib.hitad.chromLev.Chrom.refNoise : Calculate the noise level of
a given domain
tadlib.hitad.aligner.BoundSet : where the meanings of the hierarchical
level labels are explained in detail.
"""
# bin-level domain (not base-pair-level domain!)
bounds = self._getBounds(self.viterbi(seq), junctions=['30'])
pairs = [[bounds[i], bounds[i+1]] for i in range(len(bounds)-1)]
domains = []
for b in pairs:
# start, end, noise level, hierarchical level
tmp = [b[0]+start, b[1]+start, 0, 0]
domains.append(tmp)
return domains
def minCore(self, regionDIs):
"""
Output domain list for each gap-free region.
Parameters
----------
regionDIs : dict
Gap-free regions and corresponding adaptive DI arrays.
Returns
-------
minDomains : dict
Gap-free regions and corresponding identified bottom domain list.
Different from :py:meth:`tadlib.hitad.chromLev.Chrom.pipe`, the
start and the end of a domain are in base-pair unit.
"""
tmpDomains = {}
for region in sorted(regionDIs):
seq = regionDIs[region]
domains = self.pipe(seq, region[0])
cr = (region[0]*self.res, region[1]*self.res)
tmpDomains[cr] = []
for domain in domains:
domain[0] = domain[0] * self.res
domain[1] = domain[1] * self.res
domain[2] = self.refNoise(domain)
tmpDomains[cr].append(domain)
minDomains = self._orifilter(tmpDomains)
return minDomains
def getDomainList(self, byregion):
"""
Combine by-region domains into a single list.
Parameters
----------
byregion : dict
The keys are tuples representing gap-free regions of the chromosome,
and the values are corresponding identified domain lists.
Returns
-------
DomainList : list
A merged domain list of all regions
"""
DomainList = []
for region in sorted(byregion):
DomainList.extend(byregion[region])
return DomainList
def _orifilter(self, oriDomains):
"""
Perform size filtering on the input domain lists.
Parameters
----------
oriDomains : dict
The keys are tuples representing gap-free regions of the chromosome,
and the values are corresponding identified domain lists. Start
and end of the domain should be in base-pair unit.
Returns
-------
filtered : dict
Pairs of gap-free regions and corresponding filtered domain lists.
"""
filtered = {}
for region in oriDomains:
tmplist = []
for d in oriDomains[region]:
if d[1] - d[0] >= (self.minsize*self.res):
tmplist.append(d)
if len(tmplist):
filtered[region] = tmplist
return filtered
def iterCore(self, minDomains, tmpDomains):
"""
Calculate the mismatch ratio for the input two domain lists. Return
1 if *minDomains* is empty.
:py:meth:`tadlib.hitad.chromLev.Chrom.oriIter` uses this method to
determine whether to break the iteration.
Parameters
----------
minDomains : dict
Target domains calculated by the last loop.
tmpDomains : dict
Query domains returned by the current loop.
Returns
-------
tol : float
Mismatch ratio.
"""
tmplist = self.getDomainList(copy.deepcopy(minDomains))
reflist = []
for refd in tmplist:
reflist.append([self.chrom, refd[0], refd[1], 0])
if not len(reflist):
tol = 1
else:
tmplist = self.getDomainList(copy.deepcopy(tmpDomains))
alignlist = []
for alignd in tmplist:
alignlist.append([self.chrom, alignd[0], alignd[1], 0])
Ref = DomainSet('ref', reflist, self.res)
Align = DomainSet('align', alignlist, self.res)
worker = DomainAligner(Ref, Align)
worker.align('ref','align')
count = len(worker.conserved('ref','align'))
tol = 1 - count / len(Ref.Domains)
return tol
def oriIter(self, minDomains):
"""
Iteratvely approximate adaptive window sizes and return the final
bottom domain list which will be used in subsequent procedures. For
each loop, window sizes are updated according to the latest bottom
domains and next loop will re-run the identification pipeline using
new window sizes. The iteration terminates if domains between two
consecutive loops are very similar (estimated by
:py:meth:`tadlib.hitad.chromLev.Chrom.iterCore`).
Parameters
----------
minDomains : dict
Initial domains served as the target domain list for
:py:meth:`tadlib.hitad.chromLev.Chrom.iterCore` at the first
iteration. We set it empty in our pipeline.
Attributes
----------
minDomains : dict
The keys are tuples representing gap-free regions of the chromosome,
and the values are corresponding identified bottom domain lists.
Start and end of the domain are in base-pair unit.
See Also
--------
tadlib.hitad.chromLev.Chrom.calDI : calculate DI values according to
input window sizes
tadlib.hitad.chromLev.Chrom.iterCore : estimate the degree of divergence
between two domain lists
"""
for n_i in range(5):
tmpDomains = self.minCore(self.regionDIs)
tol = self.iterCore(minDomains, tmpDomains)
minDomains = tmpDomains
for region in minDomains:
for d in minDomains[region]:
ds = d[0]//self.res; de = d[1]//self.res
Len = de - ds
ws = np.max(np.r_['0,2,1', np.arange(1,Len+1), np.arange(Len,0,-1)],
axis=0)
self.windows[ds:de] = ws
self.calDI(self.windows, 0)
self.splitChrom(self.DIs)
if tol < 0.05:
break
self.minDomains = minDomains
def callDomains(self):
"""
Direct API for our hierarchical domain identification pipeline:
- Adaptively estimate window size for each bin.
(:py:meth:`tadlib.hitad.chromLev.Chrom.minWindows`)
- Calculate adaptive DIs. (:py:meth:`tadlib.hitad.chromLev.Chrom.calDI`)
- Iteratively correct adaptive window size and bottom boundary positions.
(:py:meth:`tadlib.hitad.chromLev.Chrom.oriIter`)
"""
self.minWindows(0, self.chromLen, self._dw)
self.calDI(self.windows, 0)
self.splitChrom(self.DIs)
self.oriIter({})
#self.preciseBound(self.minDomains)
self.domains = self.getDomainList(self.minDomains)
self._state = 'Completed'
def getSelfMatrix(self, start, end):
"""
Return the contact matrix of any given region.
Parameters
----------
start, end : int
The region interval in base-pair unit.
Returns
-------
Matrix : 2-D numpy ndarray, float
Sub contact matrix.
"""
startidx = start // self.res + self.chromLen
endidx = end // self.res + self.chromLen
Matrix = self.rawMatrix[startidx:endidx, startidx:endidx].toarray()
x, y = np.nonzero(Matrix)
Matrix[y, x] = Matrix[x, y]
return Matrix
def refNoise(self, domain):
"""
Return noise level of a domain, which is simply defined as the zero
entry ratio of the contact matrix.
"""
if domain[1] - domain[0] < self.res*self.minsize:
return 1
matrix = self.getSelfMatrix(domain[0], domain[1])
total = matrix.size - np.arange(len(matrix), len(matrix)-self._rm-1, -1).sum()*2 +\
len(matrix)
if total < 5:
return 1
else:
nx, ny = np.nonzero(matrix)
mask = np.abs(ny-nx) > self._rm
sigNum = mask.sum() # Number of nonzero entries apart from diag
return 1-sigNum/total
|
XiaoTaoWang/TADLib
|
tadlib/domaincaller/chromLev.py
|
Python
|
gpl-3.0
| 25,346
|
[
"Gaussian"
] |
9f4488530b541264e511e86cf136f9736be8466cd7c6188cb6ff0e1543643f31
|
@given(u'I have validated a personal URN')
def step_impl(context):
context.execute_steps(u'''
When I visit "plea/enter_urn/"
And I submit a valid URN
And I fill in "number_of_charges" with "2"
And I fill in correct date of birth
And I press "Continue"
''')
@given(u'I have validated a personal welsh URN')
def step_impl(context):
context.execute_steps(u'''
When I visit "plea/enter_urn/"
And I submit a valid welsh URN in english
And I fill in "number_of_charges" with "1"
And I fill in correct date of birth
And I press "Continue"
''')
@given(u'I have validated a company URN')
def step_impl(context):
context.execute_steps(u'''
When I visit "plea/enter_urn/"
And I submit a valid URN as company
And I submit 1 charge and correct postcode
''')
@given(u'I have submitted my personal information')
def step_impl(context):
context.execute_steps(u'''
When I enter my name and contact details
And I confirm my address as correct
And I don't provide National Insurance number
And I provide a reason for not having a National Insurance number
And I don't provide UK driving licence number
And I press "Continue"
''')
@given(u'I have pleaded guilty to both charges')
def step_impl(context):
context.execute_steps(u'''
When I plea guilty, and choose not to attend court
When I plea guilty, and choose not to attend court
''')
@given(u'I have submitted my employment details')
def step_impl(context):
context.execute_steps(u'''
When I submit my employment status as "Employed"
And I submit my home pay amount
When I choose no hardship
And I press "Continue"
''')
|
ministryofjustice/manchester_traffic_offences_pleas
|
features/steps/steps.py
|
Python
|
mit
| 1,810
|
[
"VisIt"
] |
492c8981bf75b024a46d60043051e887cb890fbb5307a3e186fe50c71b16a578
|
import sys
import math
from optparse import OptionParser, Option, OptionGroup
import scipy
from scipy import random
import numpy
np = numpy
from glue.ligolw import lsctables
from glue.ligolw import utils
from glue.ligolw import ligolw
from glue.ligolw import ilwd
#from glue.segments import segment
from glue.lal import LIGOTimeGPS as GPS
from glue.ligolw.utils import process
#from pylal.antenna import response
from minke.antenna import response
import scipy.signal as signal
import scipy.interpolate as interp
import os.path
from scipy import random
import lal
import lalburst
import lalsimulation
lalsim = lalsimulation
from minke.distribution import *
try:
import tkinter as tk
except ImportError:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if "numrel_data" in lsctables.SimBurstTable.validcolumns.keys():
NROK = True
else:
NROK = False
class Waveform(object):
"""
Generic container for different source types.
Currently, it checks for the waveform type and initializes itself appropriately.
In the future, different sources should subclass this and override the generation routines.
"""
table_type = lsctables.SimBurstTable
sim = lsctables.New(table_type)
numrel_data = []
waveform = "Generic"
expnum = 1
params = {}
def _clear_params(self):
self.params = {}
for a in self.table_type.validcolumns.keys():
self.params[a] = None
def __getattr__(self, name):
if name in self.params:
return self.params[name]
else:
raise ValueError(f"The parameter {name} isn't located in this object.")
def generate_tail(self, sampling=16384.0, length = 1, h_max = 1e-23, h_min = 0):
"""Generate a "low frequency tail" to append to the end of the
waveform to overcome problems related to memory in the
waveform.
This code was adapted from an iPython notebook provided by
Marek Szczepanczyk.
The tail needs to be added to the waveform after all of the
other corrections have been applied (DW: I think)
Parameters
----------
sampling : float
The sample rate of the injection data. By default this is 16384 Hz, which is the standard advanced LIGO sampling rate.
length : float
The length of the tail to be added, in seconds; defaults to 1.
h_max : float
The strain at the beginning of the tail -- the strain at the end of the NR data.
Notes
-----
* TODO Confirm that the tail is added-on after the waveform is
convolved with the antenna pattern.
"""
times = np.linspace(0, length, length * sampling)
tail_f = 1.0 / length / 2.0 # Calculate the frequency for a half cosine function over the length of the tail
tail = 0.5 * (h_max + (h_max-h_min) * np.cos( 2 * np.pi * tail_f * times) + h_min)
tailout = lal.CreateREAL8Vector(len(tail))
tailout.data = tail
return tailout
def parse_polarisation(self, polarisation):
"""
Convert a string description of a polarisation to an ellipse eccentricity and an ellipse angle.
Parameters
----------
polarisation : str, {'linear', 'circular', 'elliptical', 'inclination'}
The description of the polarisation, in words.
Outputs
-------
e : float
The ellipse's eccentricity.
angle : float
The ellipse angle.
"""
if polarisation == "linear":
pol_ellipse_e = 1.0
pol_ellipse_angle = 0
elif polarisation == "circular":
pol_ellipse_e = 0.0
pol_ellipse_angle = 0
elif polarisation == "elliptical":
pol_ellipse_e = uniform_interval((0,1),1)[0]
pol_ellipse_angle = uniform_interval((0,2*numpy.pi),1)[0]
elif polarisation == "inclination":
cosincl = uniform_interval((-1, 1), 1)[0]**2
pol_ellipse_e = (1 - cosincl) / (1 + cosincl)
pol_ellipse_angle = -numpy.pi/2 if uniform_interval((0, 1), 1)[0] < 0.5 else numpy.pi/2
return pol_ellipse_e, pol_ellipse_angle
def plot(self, figsize=(10,5),):
"""
Produce a plot of the injection.
"""
hp, hx, _, _ = self._generate(half=True)
f, ax = plt.subplots(1,2, figsize=figsize)
times = np.arange(0, hp.deltaT*len(hp.data.data), hp.deltaT)
ax[0].plot(times, hp.data.data, label="+ polarisation")
ax[0].plot(times, hx.data.data, label="x polarisation")
ax[1].plot(hp.data.data, hx.data.data)
return f
def _generate(self, rate=16384.0, half=False, distance=None):
"""
Generate the burst described in a given row, so that it can be
measured.
Parameters
----------
rate : float
The sampling rate of the signal, in Hz.
Defaults to 16384.0Hz
half : bool
Only compute the hp and hx once if this is true;
these are only required if you need to compute the cross
products. Defaults to False.
Returns
-------
hp :
The strain in the + polarisation
hx :
The strain in the x polarisation
hp0 :
A copy of the strain in the + polarisation
hx0 :
A copy of the strain in the x polarisation
"""
burstobj = self._burstobj()
hp, hx = lalburst.GenerateSimBurst(burstobj, 1.0/rate)
if not half :
hp0, hx0 = lalburst.GenerateSimBurst(burstobj, 1.0/rate)
else:
hp0, hx0 = hp, hx
return hp, hx, hp0, hx0
def _burstobj(self):
"""
Generate a SimBurst object for this waveform.
"""
swig_row = self._row()
burstobj = lalburst.CreateSimBurst()
for a in self.table_type.validcolumns.keys():
try:
setattr(burstobj, a, getattr(swig_row,a))
except AttributeError:
continue
except TypeError:
continue
burstobj.waveform = str(self.waveform)
if NROK:
if swig_row.numrel_data:
burstobj.numrel_data = str(swig_row.numrel_data)
else:
burstobj.numrel_data = str("")
return burstobj
def _generate_for_detector(self, ifos, sample_rate = 16384.0, nsamp = 2000):
data = []
# Loop through each interferometer
for ifo in ifos:
# Make the timeseries
row = self._row()
h_resp = lal.CreateREAL8TimeSeries("inj time series", lal.LIGOTimeGPS(0,0), 0, 1.0/sample_rate, lal.StrainUnit, nsamp)
hp, hx = self._generate(half=True)[:2]
# Get and apply detector response
det = lalsimulation.DetectorPrefixToLALDetector(ifo)
h_tot = lalsimulation.SimDetectorStrainREAL8TimeSeries(hp, hx, row.ra, row.dec, row.psi, det)
# Inject the waveform into the overall timeseries
lalsimulation.SimAddInjectionREAL8TimeSeries(h_resp, h_tot, None)
return h_tot
def _row(self, sim=None, slide_id=0):
"""
Produce a simburst table row for this waveform.
Parameters
----------
sim : table
The table which the row should be made for.
If this is left empty the table is assumed to be a
sim_burst_table.
slide_id : int
The timeslide id. Defaults to 0.
"""
if not sim: sim = self.sim
row = sim.RowType()
for a in self.table_type.validcolumns.keys():
setattr(row, a, self.params[a])
if NROK:
if self.numrel_data:
row.numrel_data = str(self.numrel_data)
else:
row.numrel_data = self.params['numrel_data']
row.waveform = self.waveform
# Fill in the time
row.set_time_geocent(GPS(float(self.time)))
# Get the sky locations
if not row.ra:
row.ra, row.dec, row.psi = self.sky_dist()
row.ra = row.ra[0]
row.dec = row.dec[0]
row.psi = row.psi[0]
row.simulation_id = sim.get_next_id()
row.waveform_number = random.randint(0,int(2**32)-1)
### !! This needs to be updated.
row.process_id = "process:process_id:0" #procrow.process_id
row.time_slide_id = ilwd.ilwdchar("time_slide:time_slide_id:%d" % slide_id)
return row
def interpolate(self, x_old, y_old, x_new, method="linear"):
"""
Convenience funtion to avoid repeated code
"""
interpolator = interp.interp1d(x_old, y_old, method)
return interpolator(x_new)
class StringCusp(Waveform):
"""
A class to represent a StringCusp injection.
"""
waveform = "StringCusp"
def __init__(self, amplitude, f_max, time, sky_dist=uniform_sky,):
"""A class to represent a SineGaussian ad-hoc waveform.
Parameters
----------
amplitude : float
The amplitude of the injection.
f_max : float
The maximum frequency of the injection.
time : float
The central time of the injection.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
"""
self._clear_params()
self.sky_dist = sky_dist
self.params['amplitude'] = amplitude
self.params['frequency'] = f_max
self.time = time
class SineGaussian(Waveform):
"""
A class to represent a SineGaussian injection.
"""
waveform = "SineGaussian"
def __init__(self, q, frequency, hrss, polarisation, time, sky_dist=uniform_sky, seed=0):
"""A class to represent a SineGaussian ad-hoc waveform.
Parameters
----------
q : float
The quality factor.
frequency : float
The frequency of the injection.
hrss : float
The strain magnitude of the injection.
polarisation : str {'linear', 'elliptical', 'circular'}
The type of polarisation of the waveform.
time : float
The central time of the injection.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
seed : int
The random seed used to make the injection time of the waveform.
The default seed is 0.
"""
self._clear_params()
self.sky_dist = sky_dist
self.hrss = self.params['hrss'] = hrss
self.seed = self.params['seed'] = seed
self.frequency = self.params['frequency'] = frequency
self.q = self.params['q'] = q
self.time = time
self.polarisation = polarisation
self.pol_ellipse_e, self.ellipse_angle = self.params['pol_ellipse_e'], self.params['pol_ellipse_angle'] = self.parse_polarisation(self.polarisation)
class Gaussian(Waveform):
"""
A class to represent a Gaussian injection.
"""
waveform = "Gaussian"
def __init__(self, duration, hrss, time, sky_dist=uniform_sky, seed=0):
"""
A class to represent a Gaussian ad-hoc waveform.
Parameters
----------
duration : float or list
The duration, in seconds, of the Gaussian waveform.
hrss : float or list
The strain magnitude of the injection.
If a float is provided then the hrss will be fixed,
if a list is provided then this will be the
minimum and maximum hrss.
polarisation : str {'linear', 'elliptical', 'circular'}
The type of polarisation of the waveform.
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
seed : float
The random seed used to make the injection time of the waveform.
The default seed is 0.
"""
self._clear_params()
self.sky_dist = sky_dist
self.params['duration'] = duration
self.params['hrss'] = hrss
self.time = time
self.params['pol_ellipse_e'] = 1.0
self.params['pol_ellipse_angle'] = 0
class WhiteNoiseBurst(Waveform):
"""
A class to represent a WNB injection.
"""
waveform = "BTLWNB"
def __init__(self, duration, bandwidth, frequency, time, hrss=None, egw=None, sky_dist=uniform_sky, seed=0):
"""A class to represent a white-noise burst ad-hoc waveform.
Parameters
----------
duration : float or list
The duration, in seconds, of the WNB.
bandwidth : float or list
The bandwidth, in hertz, of the WNB.
frequency : float or list
The frequency, in hertz, of the WNB.
hrss : float or list
The strain magnitude of the injection.
If a float is provided then the hrss will be fixed, if a
list is provided then this will be the minimum and maximum
hrss. If the hrss is not provided then you should provide
an EGW value instead.
egw : float
The gravitational wave energy.
You should provide this if you do not provide the Hrss.
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
seed : float
The random seed used to make the injection time of the waveform.
The default seed is 0.
To Do
-----
Add ability to create a WNB by giving the EGW rather than the strain.
Notes
-----
See
http://software.ligo.org/docs/lalsuite/lalsimulation/group___l_a_l_sim_burst__h.html#ga0419dc37e5b83f18cd3bb34722ddac54
for what this calls "under the hood" in LALSuite. There are some important considerations here
with respect to the differing sample rates used at LIGO and VIRGO, and so when creating the WNB it's important that the
burst is created at a single sampel rate, and then resampled appropriately, so that the same waveform is used.
"""
self._clear_params()
self.sky_dist = sky_dist
if hrss:
self.params['hrss'] = hrss
elif egw:
self.params['egw'] = egw
else:
raise valueError('You need to provide either an hrss or an egw to produce a WNB waveform')
# The burst group describes WNBs by their lowest frequency, but LALInference wants them at the central frequency,
# so add half the bandwidth to get the central freq
self.params['frequency'] = frequency + bandwidth / 2.0
# We need a minimum window size so that the whole burst can be contained within it,
# so expand the duration if it's too small.
min_len = np.sqrt( 4 * (np.pi**(-2) / bandwidth**2) )
if duration < min_len:
self.params['duration'] = min_len + 1e-5
else:
self.params['duration'] = duration
self.params['bandwidth'] = bandwidth
self.time = time
self.params['pol_ellipse_e'], self.params['pol_ellipse_angle'] = 0.0, 0.0
self.params['egw_over_rsquared'] = hrss**2 * np.pi**2 * frequency**2 * lal.C_SI / lal.G_SI / lal.MSUN_SI * lal.PC_SI**2
# I'm really not sure if we need to do this, but apparently the
# hrss of the actual waveform is not exactly what we ask for
# the old pyBurst code measured this by generating the waveform
# which seems wasteful, but I'll replicate it here anyway, for
# consistency with the method used for O1.
hp, hx, _, _ = self._generate(half=True)
self.params['hrss'] = lalsimulation.MeasureHrss(hp, hx)
class Numerical2Column(Waveform):
"""
A superclass to handle ninja-based numerical relativity waveforms.
"""
waveform = "Numerical" # We shouldn't ever use this anyway
supernova = False
extraction = None
def _make_strain(self, sample_rate=16384):
"""
Calculate the physical strain and time values which correspond to the natural unit
values in the data file.
Parameters
----------
distance : float
The distance, in megaparsec at which the waveform should be produced.
mass : float
The total mass, in solar masses of the system to be generated.
sample_rate : float
The desired sample rate for the waveform.
Notes
-----
At the moment this only works for files with h+ and hx in the columns.
"""
data = np.copy(self.data)
time_scale = (self.total_mass * lal.MSUN_SI * (lal.G_SI / lal.C_SI**3 ))
mass_geo = (self.total_mass * lal.MSUN_SI * (lal.G_SI / lal.C_SI**2 ))
distance_geo = (self.distance * 1e6 * lal.PC_SI )#* (lal.C_SI**2/lal.G_SI))
strain_scale = (distance_geo) / (mass_geo) #(self.total_mass* lal.MSUN_SI)
if self.extraction:
strain_scale /= (self.extraction)
data[:,0] *= time_scale
data[:, 1:] /= strain_scale
times = data[:,0]
target_times = np.arange(times[0], times[-1], 1./sample_rate)
output = np.zeros((len(target_times), 3))
output[:,0] = target_times
output[:,1] = self.interpolate(times, data[:,1], target_times)
output[:,2] = self.interpolate(times, data[:,2], target_times)
return output
def _generate(self, epoch="0.0", rate=16384.0, half=False, tail = True):
"""
Generate the burst described in a given row, so that it can be
measured.
Parameters
----------
rate : float
The sampling rate of the signal, in Hz.
Defaults to 16384.0Hz
half : bool
Only compute the hp and hx once if this is true;
these are only required if you need to compute the cross
products. Defaults to False.
epoch : str
The signal injection epoch.
This should be given as a string, which will then be
split at the decimal to preserve precision.
Returns
-------
hp :
The strain in the + polarisation
hx :
The strain in the x polarisation
hp0 :
A copy of the strain in the + polarisation
hx0 :
A copy of the strain in the x polarisation
"""
epoch_sec, epoch_ms = list(map(int, epoch.split(".")))
data = self._make_strain(rate)
nsamp = len(data)
hp = lal.CreateREAL8TimeSeries("inj time series", lal.LIGOTimeGPS(epoch_sec,epoch_ms), 0, 1.0/rate, lal.StrainUnit, nsamp)
hx = lal.CreateREAL8TimeSeries("inj time series", lal.LIGOTimeGPS(epoch_sec,epoch_ms), 0, 1.0/rate, lal.StrainUnit, nsamp)
hp.data.data = data[:,1]
hx.data.data = data[:,2]
return hp, hx, np.copy(hp), np.copy(hx)
def interpolate(self, x_old, y_old, x_new):
"""
Convenience funtion to avoid repeated code
"""
interpolator = interp.interp1d(x_old, y_old)
return interpolator(x_new)
class Hyperbolic(Numerical2Column):
def __init__(self, datafile, total_mass, distance, extraction, sky_dist=uniform_sky, **kwargs):
"""
A class to represent a hyperbolic or parabolic encounter waveform.
total_mass : float
The total mass of the system in solar masses.
distance : float
The distance, in megaparsecs, at which the waveform should be produced.
extraction : float
The extraction radius of the waveform.
"""
self._clear_params()
self.data = np.genfromtxt(datafile)
self.total_mass = total_mass
self.distance = distance
self.sky_dist = sky_dist
self.extraction = extraction
self.params.update(kwargs)
class Supernova(Waveform):
"""
A superclass to handle the spherial harmonic decompositions which
all supernova waveforms require.
"""
waveform = "Supernova" # We shouldn't ever use this anyway
supernova = True
file_distance = 10e-3
has_memory = False
def construct_Hlm(self, Ixx, Ixy, Ixz, Iyy, Iyz, Izz, l=2, m=2):
"""
Construct the expansion parameters Hlm from T1000553. Returns the expansion
parameters for l=2, m=m
"""
if l!=2:
print("l!=2 not supported")
sys.exit()
if abs(m)>2:
print("Only l=2 supported, |m| must be <=2")
sys.exit()
if m==-2:
Hlm = np.sqrt(4*lal.PI/5) * (Ixx - Iyy + 2*1j*Ixy)
elif m==-1:
Hlm = np.sqrt(16*lal.PI/5) * (Ixx + 1j*Iyz)
elif m==0:
Hlm = np.sqrt(32*lal.PI/15) * (Izz - 0.5*(Ixx + Iyy))
elif m==1:
Hlm = np.sqrt(16*lal.PI/5) * (-1*Ixx + 1j*Iyz)
elif m==2:
Hlm = np.sqrt(4*lal.PI/5) * (Ixx - Iyy - 2*1j*Ixy)
return Hlm
def _generate(self, rate=16384.0, half=False, distance=None, tail = True):
"""
Generate the burst described in a given row, so that it can be
measured.
Parameters
----------
rate : float
The sampling rate of the signal, in Hz.
Defaults to 16384.0Hz
half : bool
Only compute the hp and hx once if this is true;
these are only required if you need to compute the cross
products. Defaults to False.
Returns
-------
hp :
The strain in the + polarisation
hx :
The strain in the x polarisation
hp0 :
A copy of the strain in the + polarisation
hx0 :
A copy of the strain in the x polarisation
"""
burstobj = self._burstobj()
hp, hx = lalburst.GenerateSimBurst(burstobj, 1.0/rate)
if not half :
hp0, hx0 = lalburst.GenerateSimBurst(burstobj, 1.0/rate)
else:
hp0, hx0 = hp, hx
random.seed(0)
# detrend supernova waveforms
if hasattr(self, "supernova"):
hp.data.data, hx.data.data, hp0.data.data, hx0.data.data = scipy.signal.detrend(hp.data.data), scipy.signal.detrend(hx.data.data), scipy.signal.detrend(hp0.data.data), scipy.signal.detrend(hx0.data.data)
# Rescale for a given distance
if burstobj.amplitude:
rescale = 1.0 / (self.file_distance / burstobj.amplitude)
hp.data.data, hx.data.data, hp0.data.data, hx0.data.data = hp.data.data * rescale, hx.data.data * rescale, hp0.data.data * rescale, hx0.data.data * rescale
if self.has_memory and tail:
# Apply the tail correction for memory
tail_hp = self.generate_tail(length = 1, h_max = hp.data.data[-1], h_min = hp.data.data[0])
tail_hx = self.generate_tail(length = 1, h_max = hx.data.data[-1], h_min = hx.data.data[0])
hp_data = np.append(hp.data.data,tail_hp.data)
hx_data = np.append(hx.data.data,tail_hx.data)
del tail_hp, tail_hx
tail_hp = lal.CreateREAL8Vector(len(hp_data))
tail_hp.data = hp_data
tail_hx = lal.CreateREAL8Vector(len(hx_data))
tail_hx.data = hx_data
hp.data = tail_hp
hx.data = tail_hx
del tail_hp, tail_hx
return hp, hx, hp0, hx0
def generate_tail(self, sampling=16384.0, length = 1, h_max = 1e-23, h_min = 0):
"""Generate a "low frequency tail" to append to the end of the
waveform to overcome problems related to memory in the
waveform.
This code was adapted from an iPython notebook provided by
Marek Szczepanczyk.
The tail needs to be added to the waveform after all of the
other corrections have been applied (DW: I think)
Parameters
----------
sampling : float
The sample rate of the injection data. By default this is 16384 Hz, which is the standard advanced LIGO sampling rate.
length : float
The length of the tail to be added, in seconds; defaults to 1.
h_max : float
The strain at the beginning of the tail -- the strain at the end of the NR data.
Notes
-----
* TODO Confirm that the tail is added-on after the waveform is
convolved with the antenna pattern.
"""
times = np.linspace(0, length, length * sampling)
tail_f = 1.0 / length / 2.0 # Calculate the frequency for a half cosine function over the length of the tail
tail = 0.5 * (h_max + (h_max-h_min) * np.cos( 2 * np.pi * tail_f * times) + h_min)
tailout = lal.CreateREAL8Vector(len(tail))
tailout.data = tail
return tailout
def interpolate(self, x_old, y_old, x_new):
"""
Convenience funtion to avoid repeated code
"""
interpolator = interp.interp1d(x_old, y_old)
return interpolator(x_new)
def decompose(self, numrel_file, sample_rate = 16384.0, step_back = 0.01):
"""
Produce the spherial harmonic decompositions of a numerical
waveform.
Parameters
----------
numrel_file : str
The location of the numerical relativity waveform file.
sample_rate : float
The sample rate of the NR file. Defaults to 16384.0 Hz.
step_back : float
The amount of time, in seconds, of the data which should be included
before the peak amplitude. Defaults to 0.01 sec.
Returns
-------
decomposition : ndarray
The l=2 mode spherical decompositions of the waveform.
"""
# Load the times from the file
data = np.loadtxt(numrel_file)
data = data.T
times = data[0]
times -= times[0]
# Load the I components from the file
Ixx, Ixy, Ixz, Iyy, Iyz, Izz = data[5:]
# Make the new time vector for the requried sample rate
target_times = np.arange(times[0], times[-1], 1.0/sample_rate)
# Prepare the output matrix
output = np.zeros((len(target_times), 11))
# Add the times in to the first column of said matrix
output[:, 0] = target_times
for i, m in enumerate([-2,-1,0,1,2]):
Hlm = self.construct_Hlm(Ixx, Ixy, Ixz, Iyy, Iyz, Izz, l=2, m=m)
#
# Resample to uniform spacing at 16384 kHz
#
Hlm_real = self.interpolate(times, Hlm.real, target_times)
Hlm_imag = self.interpolate(times, Hlm.imag, target_times)
#
# Make the output, and rescale it into dimensionless strain values
#
output[:,2*(i+1)-1] = Hlm_real * np.sqrt(lal.G_SI / lal.C_SI**4) #/lal.MRSUN_SI / ( distance * lal.PC_SI * 1e6)
output[:,2*(i+1)] = -Hlm_imag * np.sqrt(lal.G_SI / lal.C_SI**4)#/lal.MRSUN_SI / ( distance * lal.PC_SI * 1e6)
return output
class Ott2013(Supernova):
"""
The Ott+2013 supernova waveform
"""
has_memory = True
waveform = "Ott+13"
def __init__(self, theta, phi, time, sky_dist=uniform_sky, distance = 10e-3, filepath=None, family="s27fheat1p05", decomposed_path=None):
"""
Parameters
----------
phi : float
The internal phi parameter of the supernova injection.
theta : float
The internal inclination parameter of the supernova injection.
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
distance : float
The distance, in megaparsecs, at which the injection should be made.
filepath : str
The filepath to the folder containing the pre-rotated numerical relativity waveforms.
family : str
The family of waveforms which are to be used for the injection set.
decomposed_path : str
The location where the decomposed waveform file should be stored. Optional.
"""
self._clear_params()
self.time = time
self.params['phi'] = phi
self.params['incl'] = theta
self.sky_dist = sky_dist
#self.params['numrel_data'] = filepath #decomposed_path #self.numrel_data
if not decomposed_path : decomposed_path = filepath+".dec"
if not os.path.isfile(decomposed_path) :
decomposed = self.decompose(filepath, sample_rate = 16384.0, step_back = 0.01)
np.savetxt(decomposed_path, decomposed, header="time (2,-2) (2,-1) (2,0) (2,1) (2,2)", fmt='%.8e')
self.numrel_data = self.params['numrel_data'] = decomposed_path
self.params['amplitude'] = distance # We store the distance in the amplitude column because there isn't a distance column
self.params['hrss'] = self.file_distance # Again the hrss value is the distance at which the files are scaled
class Mueller2012(Supernova):
"""
The Mueller2012 waveform.
"""
waveform = "Mueller+12"
has_memory = True
def __init__(self, theta, phi, time, distance = 10e-3, sky_dist=uniform_sky, filepath=None, family="L15-3", decomposed_path=None):
"""
Parameters
----------
phi : float
The internal phi parameter of the supernova injection.
theta : float
The internal inclination parameter of the supernova injection.
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
filepath : str
The filepath to the folder containing the pre-rotated numerical relativity waveforms.
family : str
The family of waveforms which are to be used for the injection set.
decomposed_path : str
The location where the decomposed waveform file should be stored. Optional.
"""
self._clear_params()
self.time = time
self.params['phi'] = phi
self.params['incl'] = theta
self.sky_dist = sky_dist
if not decomposed_path : decomposed_path = filepath+".dec"
if not os.path.isfile(decomposed_path) :
decomposed = self.decompose(filepath, sample_rate = 16384.0, step_back = 0.01)
np.savetxt(decomposed_path, decomposed, header="time (2,-2) (2,-1) (2,0) (2,1) (2,2)", fmt='%.8e')
#self.numrel_data = filepath + "/" + family
self.params['numrel_data'] = decomposed_path #self.numrel_data
self.params['amplitude'] = distance # We store the distance in the amplitude column because there isn't a distance column
self.params['hrss'] = self.file_distance # Again the hrss value is the distance at which the files are scaled
def decompose(self, numrel_file, sample_rate = 16384.0, step_back = 0.01):
"""
Produce the spherial harmonic decompositions of a numerical
waveform.
Parameters
----------
numrel_file : str
The location of the numerical relativity waveform file.
sample_rate : float
The sample rate of the NR file. Defaults to 16384.0 Hz.
step_back : float
The amount of time, in seconds, of the data which should be included
before the peak amplitude. Defaults to 0.01 sec.
distance : float
The distance, in megaparsecs, from the observer at which the NR waveforms were
simulated. Defaults to 10 kpc (i.e. 10e-3 Mpc).
Returns
-------
decomposition : ndarray
The l=2 mode spherical decompositions of the waveform.
"""
# Load the times from the file
data = np.loadtxt(numrel_file)
data = data.T
times = data[1]
times -= times[0]
# Load the I components from the file
Ixx, Iyy, Izz, Ixy, Ixz, Iyz = data[6:]
# Make the new time vector for the requried sample rate
target_times = np.arange(times[0], times[-1], 1.0/sample_rate)
# Prepare the output matrix
output = np.zeros((len(target_times), 11))
# Add the times in to the first column of said matrix
output[:, 0] = target_times
for i, m in enumerate([-2,-1,0,1,2]):
Hlm = self.construct_Hlm(Ixx, Ixy, Ixz, Iyy, Iyz, Izz, l=2, m=m)
#
# Resample to uniform spacing at 16384 kHz
#
Hlm_real = self.interpolate(times, Hlm.real, target_times)
Hlm_imag = self.interpolate(times, Hlm.imag, target_times)
#
# Make the output, and rescale it into dimensionless strain values
#
output[:,2*(i+1)-1] = Hlm_real * np.sqrt(lal.G_SI / lal.C_SI**4) #/lal.MRSUN_SI / ( distance * lal.PC_SI * 1e6)
output[:,2*(i+1)] = -Hlm_imag * np.sqrt(lal.G_SI / lal.C_SI**4)#/lal.MRSUN_SI / ( distance * lal.PC_SI * 1e6)
return output
# def _generate(self):
# """
# Generate the Mueller waveforms. This must be performed
# differently to other waveform morphologies, since we require
# the use of pre-generated text files.
# The filepath and the start of the filenames should be provided in
# the numrel_data column of the SimBurstTable, so we need to contruct
# the rest of the filename from the theta and phi angles, and then load
# that file.
# """
# theta, phi = self.params['incl'], self.params['phi']
# numrel_file_hp = self.numrel_data + "_costheta{:.3f}_phi{:.3f}-plus.txt".format(theta, phi)
# numrel_file_hx = self.numrel_data + "_costheta{:.3f}_phi{:.3f}-cross.txt".format(theta, phi)
# data_hp = np.loadtxt(numrel_file_hp)
# data_hx = np.loadtxt(numrel_file_hx)
# return data_hp, data_hx, data_hp, data_hx
class Scheidegger2010(Supernova):
"""
The Scheidegger2010 waveform.
"""
waveform = "Scheidegger+10"
def __init__(self, theta, phi, time, distance = 10e-3, sky_dist=uniform_sky, filepath=None, family="R1E1CA_L", decomposed_path=None):
"""
Parameters
----------
phi : float
The internal phi parameter of the supernova injection.
theta : float
The internal inclination parameter of the supernova injection.
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
filepath : str
The filepath to the folder containing the pre-rotated numerical relativity waveforms.
family : str
The family of waveforms which are to be used for the injection set.
decomposed_path : str
The location where the decomposed waveform file should be stored. Optional.
"""
self._clear_params()
self.time = time
self.params['phi'] = phi
self.params['incl'] = theta
self.sky_dist = sky_dist
if not decomposed_path : decomposed_path = filepath+".dec"
if not os.path.isfile(decomposed_path) :
decomposed = self.decompose(filepath, sample_rate = 16384.0, step_back = 0.01)
np.savetxt(decomposed_path, decomposed, header="time (2,-2) (2,-1) (2,0) (2,1) (2,2)", fmt='%.8e')
#self.numrel_data = filepath + "/" + family
self.params['numrel_data'] = decomposed_path #self.numrel_data
self.params['amplitude'] = distance # We store the distance in the amplitude column because there isn't a distance column
self.params['hrss'] = self.file_distance # Again the hrss value is the distance at which the files are scaled
class Dimmelmeier08(Supernova):
"""
The Dimmelmeier08 waveform.
"""
waveform = "Dimmelmeier+08"
def __init__(self, time, distance = 10e-3, sky_dist=uniform_sky, filepath="signal_s15a2o05_ls.dat", decomposed_path=None, ):
"""
Parameters
----------
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
filepath : str
The filepath to the numerical relativity waveform.
decomposed_path : str
The location where the decomposed waveform file should be stored. Optional.
"""
self._clear_params()
self.time = time
self.sky_dist = sky_dist
if not decomposed_path : decomposed_path = filepath+".dec"
if not os.path.isfile(decomposed_path) :
decomposed = self.decompose(filepath, sample_rate = 16384.0, step_back = 0.01)
np.savetxt(decomposed_path, decomposed, header="time (2,-2) (2,-1) (2,0) (2,1) (2,2)", fmt='%.8e')
self.params['phi']=0
self.params['incl']=90
self.params['numrel_data'] = decomposed_path#
self.params['amplitude'] = distance # We store the distance in the amplitude column because there isn't a distance column
self.params['hrss'] = self.file_distance # Again the hrss value is the distance at which the files are scaled
def decompose(self, numrel_file, sample_rate = 16384.0, step_back = 0.01):
"""
Produce the spherial harmonic decompositions of the Dimmelmeier numerical
waveform. This is a special case since it is axisymmetric.
Parameters
----------
numrel_file : str
The location of the numerical relativity waveform file.
sample_rate : float
The sample rate of the NR file. Defaults to 16384.0 Hz.
step_back : float
The amount of time, in seconds, of the data which should be included
before the peak amplitude. Defaults to 0.01 sec.
Returns
-------
decomposition : ndarray
The l=2 mode spherical decompositions of the waveform.
"""
extract_dist = 10e-3
# Load the times from the file
data = np.loadtxt(numrel_file)
data = data.T
times = data[0]*1e-3
times -= times[0]
# Load the hp components
strain = data[1]
# Make the new time vector for the requried sample rate
target_times = np.arange(times[0], times[-1], 1.0/sample_rate)
# Prepare the output matrix
output = np.zeros((len(target_times), 11))
# Add the times in to the first column of said matrix
output[:, 0] = target_times #/ lal.MTSUN_SI
#
# Resample to uniform spacing at 16384 kHz
#
strain_new = self.interpolate(times, strain, target_times)
#
# Make the output, and rescale it into dimensionless strain values
#
output[:,5] = strain_new #/* ( extract_dist * lal.PC_SI * 1.0e6)
return output
class Ringdown(Waveform):
"""
A class to handle Rindown waveforms.
"""
table_type = lsctables.SimRingdownTable
waveform = "GenericRingdown"
class Yakunin10(Supernova):
"""
The Yakunin10 waveform.
"""
waveform = "Yakunin+10"
def __init__(self, time, distance = 10e-3, sky_dist=uniform_sky, filepath="Yakunin2010/hplus-B12-WH07_tail.txt", decomposed_path=None, ):
"""
Parameters
----------
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
filepath : str
The filepath to the numerical relativity waveform.
decomposed_path : str
The location where the decomposed waveform file should be stored. Optional.
"""
self._clear_params()
self.params['amplitude'] = distance # We store the distance in the amplitude column because there isn't a distance column
self.params['hrss'] = self.file_distance # Again the hrss value is the distance at which the files are scaled
self.time = time
self.sky_dist = sky_dist
if not decomposed_path : decomposed_path = filepath+".dec"
if not os.path.isfile(decomposed_path) :
decomposed = self.decompose(filepath, sample_rate = 16384.0, step_back = 0.01)
np.savetxt(decomposed_path, decomposed, header="time (2,-2) (2,-1) (2,0) (2,1) (2,2)", fmt='%.8e')
self.params['phi']=0
self.params['incl']=90
self.params['numrel_data'] = decomposed_path
def decompose(self, numrel_file, sample_rate = 16384.0, step_back = 0.01):
"""
Produce the spherial harmonic decompositions of the Dimmelmeier numerical
waveform. This is a special case since it is axisymmetric.
Parameters
----------
numrel_file : str
The location of the numerical relativity waveform file.
sample_rate : float
The sample rate of the NR file. Defaults to 16384.0 Hz.
step_back : float
The amount of time, in seconds, of the data which should be included
before the peak amplitude. Defaults to 0.01 sec.
Returns
-------
decomposition : ndarray
The l=2 mode spherical decompositions of the waveform.
"""
extract_dist = 10e-3
# Load the times from the file
data = np.loadtxt(numrel_file)
data = data.T
times = data[0]
times -= times[0]
# Load the hp components
strain = data[1]
# Make the new time vector for the requried sample rate
target_times = np.arange(times[0], times[-1], 1.0/sample_rate)
# Prepare the output matrix
output = np.zeros((len(target_times), 11))
# Add the times in to the first column of said matrix
output[:, 0] = target_times #/ lal.MTSUN_SI
#
# Resample to uniform spacing at 16384 kHz
#
strain_new = self.interpolate(times, strain, target_times)
#
# Make the output, and rescale it into dimensionless strain values
#
output[:,5] = strain_new #/* ( extract_dist * lal.PC_SI * 1.0e6)
return output
class LongDuration(Supernova):
"""
A superclass to handle the spherial harmonic decompositions which
long duration numerical relativity bursts may require.
"""
waveform = "LongDuration" # We shouldn't ever use this anyway
supernova = True
class ADI(LongDuration):
"""
Accretion disk instability waveforms which are generated using the method described in
LIGO-T1100093, at https://dcc.ligo.org/LIGO-T1100093. The waveforms are based off a model
by MH van Putten,
M. H. van Putten, A. Levinson, H. K. Lee, T. Regimbau, M. Punturo, and G. M. Harry. Phys. Rev. D., 69(4), 044007, 2004.
M. H. van Putten. Phys. Rev. Lett., 87(9), 091101, 2001.
The waveforms are stored in .mat binary files which can be read-in by SciPy.
"""
waveform = "ADI"
def __init__(self, time, sky_dist=uniform_sky, filepath="stamp_adi_a_tapered.mat", decomposed_path=None, ):
"""
Parameters
----------
time : float or list
The time period over which the injection should be made. If
a list is given they should be the start and end times, and
the waveform will be produced at some random point in that
time range. If a float is given then the injection will be
made at that specific time.
sky_dist : func
The function describing the sky distribution which the injections
should be made over. Defaults to a uniform sky.
filepath : str
The filepath to the numerical relativity waveform.
decomposed_path : str
The location where the decomposed waveform file should be stored. Optional.
"""
self._clear_params()
self.time = time
self.sky_dist = sky_dist
if not decomposed_path : decomposed_path = filepath+".dec"
if not os.path.isfile(decomposed_path) :
decomposed = self.decompose(filepath)
np.savetxt(decomposed_path, decomposed, header="time\thplus\thcross", fmt='%.8e')
#decomposed_path = filepath
self.params['phi']=0
self.params['incl']=90
self.params['numrel_data'] = decomposed_path
def _generate(self, rate = 16384.0, half=False):
data = np.genfromtxt(self.params['numrel_data'])
nsamp = len(data)
hp = lal.CreateREAL8TimeSeries("inj time series", lal.LIGOTimeGPS(0,0), 0, 1.0/rate, lal.StrainUnit, nsamp)
hx = lal.CreateREAL8TimeSeries("inj time series", lal.LIGOTimeGPS(0,0), 0, 1.0/rate, lal.StrainUnit, nsamp)
hp.data.data = data[:,1]
hx.data.data = data[:,2]
return hp, hx, np.copy(hp), np.copy(hx)
def decompose(self, numrel_file, sample_rate = 16384.0, step_back = 0.01):
"""
Produce the spherial harmonic decompositions of the ADI
waveform. This is a special case since it is axisymmetric.
Parameters
----------
numrel_file : str
The location of the numerical relativity waveform file.
sample_rate : float
The sample rate of the output NR file. Defaults to 16384.0 Hz, and should
be the same as the data rate of the detector.
step_back : float
The amount of time, in seconds, of the data which should be included
before the peak amplitude. Defaults to 0.01 sec.
Returns
-------
decomposition : ndarray
The re-interpolated file at the desired sample rate which is in the
<time hp hc> format which can be accepted by LALSimulation.
"""
from scipy import io
# Load the matlab file
data = io.matlab.loadmat(numrel_file)
comment = data['comment'][0].split(';')
comment_dict = {}
for line in comment:
sp = line.split("=")
comment_dict[sp[0].strip()] = sp[1].strip()
extract_dist = comment_dict['dist']
# We actually want the extract distance as a float of megaparsecs
if extract_dist == "1 Mpc": extract_dist = 1.0
# Load the sample rate of the file from the file
fs = int(data['fs'])
# Determine the end time
start = 0
end = len(data['hp']) * 1.0 / fs
# Make the time array
times = np.arange(start, end, 1.0/fs)
# Make the new time vector for the requried sample rate
target_times = np.arange(times[0], times[-1], 1.0/sample_rate)
#print len(target_times)
# Load the hp components
strainp = data['hp'].T[0].astype(np.float32)
strainc = data['hc'].T[0].astype(np.float32)
#del data
# Prepare the output matrix
output = np.zeros((len(target_times), 3))
# Add the times in to the first column of said matrix
output[:, 0] = target_times
#
# Resample to uniform spacing at 16384 kHz
#
strainp_new = self.interpolate(times, strainp, target_times)
strainc_new = self.interpolate(times, strainc, target_times)
#
# Make the output.
#
output[:,1] = strainp_new
output[:,2] = strainc_new
return output
class BBHRingdown(Ringdown):
"""
A class to represent BBH ringdowns.
"""
#lalsimfunction = SimBlackHoleRingdown
waveform = "BBHRingdown"
def __init__(self, time, phi0, mass, spin, massloss, distance, inclination, sky_dist=uniform_sky):
"""
Binary Black Hole (BBH) Ringdown waveform
Parameters
----------
time : float
The time that the waveform should be generated at, in gps seconds.
phi0 : float
The starting phase.
mass : float
The mass of the final black hole in solar masses.
spin : float
The dimensionless spin parameter for the final black hole.
massloss : float
The total mass loss of the system. (Also denoted epsilon).
distance : float
The effective luminosity distance at which the signal should be generated.
inlination : float
The inclination of the system in degrees.
"""
self._clear_params()
self.time = self.geocent_start_time = self.params['geocent_start_time'] = time
self.sky_dist = sky_dist
self.params['simulation_id'] = self.simulation_id = self.sim.get_next_id()
self.params['phase'] = phi0
self.params['mass'] = mass # in solar masses
self.params['spin'] = spin
self.params['epsilon'] = massloss
self.params['eff_dist_l'] = self.eff_dist_l = distance # megaparsec
self.params['inclination'] = self.inclination = float(inclination)
def _generate(self, rate=16384.0, half=False, l = 2, m = 2):
"""
Generate this BBH Ringdown waveform.
Parameters
----------
rate : float
The signal sampling rate. Defaults to 16384.0 Hz.
l : int
The azimuthal number of the mode to be generated.
m : int
The polar number of the mode to be generated.
half : bool
Only compute the hp and hx once if this is true;
these are only required if you need to compute the cross
products. Defaults to False.
Returns
-------
hp :
The strain in the + polarisation
hx :
The strain in the x polarisation
hp0 :
A copy of the strain in the + polarisation
hx0 :
A copy of the strain in the x polarisation
"""
dt = 1.0 / rate
hp, hx = lalsimulation.SimBlackHoleRingdown(self.params['geocent_start_time'],
self.params["phase"],
dt,
self.params['mass']*lal.MSUN_SI,
self.params['spin'],
self.params['epsilon'],
self.params['eff_dist_l'] * 1e6 * lal.PC_SI,
np.deg2rad(self.params['inclination']),
l, m)
if not half:
hp0, hx0 = lalsimulation.SimBlackHoleRingdown(self.params['geocent_start_time'],
self.params["phase"],
dt,
self.params['mass']*lal.MSUN_SI,
self.params['spin'],
self.params['epsilon'],
self.params['eff_dist_l'] * 1e6 * lal.PC_SI,
np.deg2rad(self.params['inclination']),
l, m)
else:
hp0, hx0 = hp, hx
return hp, hx, hp0, hx0
|
transientlunatic/minke
|
minke/sources.py
|
Python
|
isc
| 55,225
|
[
"Gaussian"
] |
b862dd7c1963f27a1da43a7c3a95d2f6bc2d0b56c118c50ffe0cc2b529d0afcf
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bowtie(MakefilePackage):
"""Bowtie is an ultrafast, memory-efficient short read aligner
for short DNA sequences (reads) from next-gen sequencers."""
homepage = "https://sourceforge.net/projects/bowtie-bio/"
url = "https://github.com/BenLangmead/bowtie/archive/v1.2.0.tar.gz"
version('1.3.0', sha256='d7c2d982a67884909f284a0ff150b56b20127cd7a1ced461c3c4c03e6a6374c5')
version('1.2.3', sha256='86402114caeacbb3a3030509cb59f0b7e96361c7b3ee2dd50e2cd68200898823')
# The bowtie project git tagged and GitHub released a v1.2.2,
# discovered/fixed a bug, git tagged a v1.2.2_p1 and moved the
# 1.2.2 release to use it rather than making a new `1.2.2_p1`
# release.
#
# We point both of the Spack versions at the same tarball so they
# build the binaries that are on the release page as v1.2.2
version('1.2.2_p1', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1')
version('1.2.2', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1', url="https://github.com/BenLangmead/bowtie/archive/v1.2.2_p1.tar.gz")
version('1.2.1.1', sha256='1b38408b88f61d18d7ff28b2470a8cfeefccb3fc59fd46e4cc62e23874e52c20')
version('1.2.1', sha256='b2a7c8c879cb08f00a82665bee43e1d4861de44a87912c54d168e44c90869728')
version('1.2.0', sha256='dc4e7951b8eca56ce7714c47fd4e84f72badd5312ee9546c912af1963570f894')
# Keeping the old 1.2 version around for reproducibility, it's not
# clearly identical to 1.2.0.
version('1.2', sha256='b1052de4253007890f6436e6361d40148bc2a5a9dd01827bb9f34097747e65f8', url='https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.0/bowtie-1.2-source.zip')
# 1.2.2 and 1.2.2_p1 fail to build with %gcc@8.3.0
# with and without issue-87 patch
conflicts('%gcc@8:', when='@1.2.2')
conflicts('%gcc@8:', when='@1.2.2_p1')
variant('tbb', default=False, description='Use Intel thread building block')
depends_on('tbb', when='+tbb')
depends_on('zlib')
# See: https://github.com/BenLangmead/bowtie/issues/87, a
# different fix is in the FreeBSD ports/package tree
# https://svnweb.freebsd.org/ports?view=revision&revision=483954
patch('issue-87.patch', when='@:1.2.2 %gcc@8.0.0:')
# correspond to 'aarch64' architecture
# reference: https://github.com/BenLangmead/bowtie/pull/13
patch('for_aarch64.patch', when='target=aarch64:')
# measures for narrowing error
patch('fix_narrowing_err.patch', when='@:1.2.3')
patch('fix_narrowing_err_1.3.0.patch', when='@1.3.0:')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter('CC = .*', 'CC = ' + env['CC'])
makefile.filter('CXX = .*', 'CPP = ' + env['CXX'])
def build(self, spec, prefix):
if '+tbb' in spec:
make()
else:
make('NO_TBB=1')
def install(self, spec, prefix):
make('prefix={0}'.format(self.prefix), 'install')
|
rspavel/spack
|
var/spack/repos/builtin/packages/bowtie/package.py
|
Python
|
lgpl-2.1
| 3,205
|
[
"Bowtie"
] |
24a3f9bbf7ddc0e2bf0a82607d40d2e32092fe5fbbcded8c5f4562b66e313944
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy import random
from numpy.compat import asbytes
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(TestCase):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setUp(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(TestCase):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(TestCase):
def setUp(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
self.assertEqual(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
self.assertEqual(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
self.assertEqual(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
|
ContinuumIO/numpy
|
numpy/random/tests/test_random.py
|
Python
|
bsd-3-clause
| 62,019
|
[
"Gaussian"
] |
9e34ba764c3dacc78e405c8092e84c4006fcc2e5d9f93c19a6b410b7833606b6
|
###############################################################################
# NARR_to_text.py
# email: ritvik@umd.edu, 24th March, 2015
#
# Convert downloaded data to text
###############################################################################
import constants, util, logging, os, subprocess, pdb, multiprocessing, sys
import numpy as np
###############################################################################
# process_NARR_to_text
# Convert NARR netcdf file into a flat text file and create EPIC daily
# weather station list file
###############################################################################
def process_NARR_to_text(cur_var):
# -s select format for output
# -C controls which variable is extracted
# -H prints data to screen
# -d controls hyperslap
# -v specify name of variable
ncks_get = 'ncks -s \'%f\' -C -H -d '
ncks_sub = 'ncks -d '
# _U unpacking
# -o force overwriting of existing file
ncpdq_fn = 'ncpdq -U -O '
var_name = constants.vars_to_get.get(cur_var)
# Extract Lon and Lat boundaries
logging.info('Extracting boundary by lon and lat')
latli,latui,lonli,lonui = util.get_boundary_region(constants.narr_dir+os.sep+cur_var+os.sep+cur_var+'.'+str(constants.START_YR)+'.nc')
for year in range(constants.START_YR,constants.END_YR+1):
inp_nc = constants.narr_dir+os.sep+cur_var+os.sep+cur_var+'.'+str(year)+'.nc' # Input netcdf
unp_nc = constants.data_dir+os.sep+'Data'+os.sep+cur_var+os.sep+cur_var+'.'+str(year)+'.nc' # Unpacked netcdf
# In netcdf file is missing, bail
if not(os.path.isfile(inp_nc)):
logging.info(inp_nc+' not present. Exiting!')
sys.exit(0)
# Create output directory by netcdf variable and by year
util.make_dir_if_missing(constants.data_dir+os.sep+'Data'+os.sep+cur_var+os.sep+str(year))
if not(os.path.isfile(unp_nc)):
# Subset netcdf file by lat and lon boundaries
logging.info('Subsetting '+var_name+' by lon and lat')
subst_str = subprocess.check_output((ncks_sub+' x,'+str(lonli)+','+str(lonui)+' -d y,'+str(latli)+','+str(latui)+' '+inp_nc +' '+unp_nc))
# Unpack to convert netcdf into a readable format
logging.info('Unpacking '+var_name+' variable for '+str(year))
unpck_str = subprocess.check_output(ncpdq_fn+unp_nc+' '+unp_nc)
else:
logging.info('File exists: '+unp_nc)
# Create EPIC weather station list file
wth_fl = constants.out_dir+os.sep+constants.EPIC_DLY
mon_fl = constants.out_dir+os.sep+constants.EPIC_MON
wxr_fl = constants.epic_dly+os.sep+constants.WXPMRUN
if(year==constants.START_YR and (not(os.path.isfile(wth_fl)) or not(os.path.isfile(mon_fl)) or not(os.path.isfile(wxr_fl)))):
logging.info('Creating EPIC weather station list file')
# Create EPIC weather list file
epic_wth = open(wth_fl,'w')
epic_mon = open(mon_fl,'w')
wxrmrun = open(wxr_fl,'w')
# The first year NARR data file is used to extract longitude and latitude
idx = 1
for i in range(0,lonui-lonli):
for j in range(0,latui-latli):
lon_str = subprocess.check_output(ncks_get+'x,'+str(i)+','+str(i)+' -d '+'y,'+str(j)+','+str(j)+' -v lon '+unp_nc).strip("\r\n\t '")
lat_str = subprocess.check_output(ncks_get+'x,'+str(i)+','+str(i)+' -d '+'y,'+str(j)+','+str(j)+' -v lat '+unp_nc).strip("\r\n\t '")
epic_wth.write('%5s "%10s" %5.3f %5.3f\n' % (idx,'daily//'+str(j)+'_'+str(i)+'.txt',np.float(lat_str),np.float(lon_str)))
epic_mon.write('%5s "%10s" %5.3f %5.3f\n' % (idx,'monthly//'+str(j)+'_'+str(i)+'.txt',np.float(lat_str),np.float(lon_str)))
if i==(lonui-lonli-1) and j==(latui-latli-1):
wxrmrun.write(str(j)+'_'+str(i))
else:
wxrmrun.write(str(j)+'_'+str(i)+'\n')
idx += 1
epic_wth.close()
epic_mon.close()
wxrmrun.close()
# Extract all data from variable for given co-ord (data is a time series)
logging.info('Extracting '+var_name+' variable for '+str(year))
for i in range(0,lonui-lonli):
for j in range(0,latui-latli):
if(not(os.path.isfile(constants.data_dir+os.sep+'Data'+os.sep+cur_var+os.sep+str(year)+os.sep+str(j)+'_'+str(i)+'.txt'))):
exec_str = ncks_get+\
'x,'+str(i)+','+str(i)+' -d '+'y,'+str(j)+','+str(j)+\
' -v '+var_name+' '+unp_nc+\
' > ' + constants.data_dir+os.sep+'Data'+os.sep+cur_var+os.sep+str(year)+os.sep+str(j)+'_'+str(i)+'.txt'
os.system(exec_str)
else:
logging.info('File exists: '+constants.data_dir+os.sep+'Data'+os.sep+cur_var+os.sep+str(year)+os.sep+str(j)+'_'+str(i)+'.txt')
###############################################################################
# parallelize_NARR_to_text
# Iterate/Parallelize NARR netcdf conversion to text
#
###############################################################################
def parallelize_NARR_to_text():
pkg_num = 0
threads = []
total_runs = len(constants.vars_to_get) # Find total number of runs based on number of NARR elements to extract
if(constants.DO_PARALLEL):
pool = multiprocessing.Pool(constants.max_threads)
pool.map(process_NARR_to_text, constants.vars_to_get.keys())
pool.close()
pool.join()
else:
for pkg_num,c_var in enumerate(constants.vars_to_get):
process_NARR_to_text(c_var)
logging.info('Done NARR_to_text!')
if __name__ == '__main__':
parallelize_NARR_to_text()
|
ritviksahajpal/EPIC
|
NARR/NARR_to_text.py
|
Python
|
mit
| 6,044
|
[
"NetCDF"
] |
81df3b3de1c0cf0a21fc201b58ae21cc89f1e106ddbe8ad9d65348e6f3c60785
|
###############################################################################
# create_EPIC_weather_files.py
# email: ritvik@umd.edu, 24th March, 2015
#
# 1. Download NARR data based on user specified region/site, variables and years
# 2. Convert downloaded data to text and then EPIC compatible weather files
###############################################################################
import wget_NARR,NARR_to_text,NARR_to_EPIC,create_EPIC_monthly,constants,util,pdb,logging
if __name__ == '__main__':
# Store constants in log file
util.log_constants()
# Download NARR files
wget_NARR.parallelize_download_NARR()
# Convert NARR netcdf file into text files
NARR_to_text.parallelize_NARR_to_text()
# Convert text files into EPIC files
NARR_to_EPIC.parallelize_NARR_to_EPIC()
# Create EPIC monthly files
create_EPIC_monthly.create_monthly()
|
ritviksahajpal/EPIC
|
NARR/create_EPIC_weather_files.py
|
Python
|
mit
| 891
|
[
"NetCDF"
] |
27a620f8862acab0aa10ae93763647337cc585cf523e526a5b6b6c9981bbbb76
|
import re, os, sys
from optparse import OptionParser
from numpy import *
import subprocess
from communities import *
class ResCluster :
members = []
selection_text = None #selection text for pymol
number = 0 #cluster number
intra_cluster_variance = 0
extra_cluster_variance = 0
indexlist = []
indexlist_complement = []
def __init__(self,number,firstcluster):
self.number = number
self.members = []
self.firstcluster = firstcluster
def __str__(self):
if(self.selection_text == None):
self.selection_text = ""
firstone = self.firstcluster
for member in self.members:
if firstone !=1:
self.selection_text +=","
firstone = 0
self.selection_text += "(resi "+str(member.number)+" "
if member.tag == "":
self.selection_text += " and (name N,H,CA,C,O) "
if member.tag == "S":
self.selection_text += " and (not name N,H,CA,C,O) "
if member.chain != None:
self.selection_text += " and (chain "+str(member.chain)+" ) "
self.selection_text += " )"
return "sele cluster"+str(self.number)+", "+"( "+self.selection_text+")\n color "+str(int(self.number+1))+", cluster"+str(self.number)+"\n"
def read_res_matrix(myfilename): #from dihedral_mutent.py
rownames = []
colnames = []
myfile = open(myfilename,'r')
inlines = myfile.readlines()
myfile.close()
res = inlines[0].split()
mymatrix = zeros((int(len(inlines[1:])), int(len(res))),float64)
#print mymatrix.shape
for myname_num in res:
colnames.append(myname_num)
#print colnames
#print len(colnames)
for row_num in range(int(len(inlines[1:]))):
thisline = inlines[row_num + 1]
thislinedata = thisline.split()
thisname = thislinedata[0]
res_num = int(floor(row_num))
thislinenums = map(float, thislinedata[1:]) #does this need to be float64 or another double precision thingy?
#print thislinenums
thislinearray = array(thislinenums,float64)
#print thislinearray.shape
rownames.append(thisname)
for col_num in range(len(colnames)):
#print "name: "+str(thisname)+" chi: "+str(row_chi)+ " row_num: "+str(row_num)+" row_chi: "+str(row_chi)+ " col_num: "+str(col_num)+" col_chi: "+str(col_chi)+"\n"
mymatrix[res_num,col_num] = float64(thislinearray[col_num])
#print rownames
return mymatrix, rownames, colnames
# output the elements of a matrix in string formatting, optionally zeroing the diagonal terms
def output_matrix(myfilename,mymatrix,rownames,colnames, zero_diag=False):
myfile = open(myfilename,'w')
for col_num, col_name in zip(range(len(colnames)), colnames):
myfile.write(str(col_name) + " ")
myfile.write("\n")
for row_num, row_name in zip(range(len(rownames)), rownames):
myfile.write(str(row_name) + " ")
for col_num, col_name in zip(range(len(colnames)), colnames):
if col_num == row_num and zero_diag:
myfile.write(str(0))
else:
#print row_num, col_num, mymatrix[row_num,col_num]
myfile.write(str(mymatrix[row_num,col_num]))
myfile.write(" ")
myfile.write("\n")
myfile.close()
class Res:
name = "ALA"
number = 0
clusternum = -1 #not assigned to a cluster
tag = "" #default to mainchain
chain = None #default to no chain
def __init__(self,name,number,tag,clusternum,chain=None):
self.name = name
self.number = number
self.tag = tag
self.clusternum = clusternum
self.chain = chain
def __str__(self):
return str(self.name)+" "+str(self.number)+" "+str(self.tag)+" "+str(self.clusternum)+" "+str(self.chain)
def matrix_header(self):
if(self.chain == None):
return str(self.name)+str(self.number)+str(self.tag)
else:
return str(self.name)+str(self.number)+str(self.tag)+str(self.chain)
def __eq__(self, other):
return ( self.name == other.name and self.number == other.number and self.clusternum == other.clusternum and self.chain == other.chain and self.tag == other.tag )
#############################################################################################################################################################################
def do_cg_edgeweights_from_mutinf_matrix(options):
try:
Rfile = open(options.Rfile,'w')
except:
print "cannot open R command file to be created "+str(options.Rfile)
sys.exit(1)
if options.mutinf != None:
mutinf_matrix, rownames, colnames = read_res_matrix(options.mutinf)
if options.dist_variance != None:
dist_variance_matrix, rownames2, colnames2 = read_res_matrix(options.dist_variance)
print "shape of dist variance matrix: "+str(shape(dist_variance_matrix))
print "len rownames: "+str(len(rownames))
print "len colnames: "+str(len(colnames))
print "mutinf matrix shape: "+str(mutinf_matrix.shape)
if options.contacts != None:
try:
contacts, rownames_contacts, colnames_contacts = read_res_matrix(options.contacts)
except:
print "cannot open contacts file: "+str(options.contacts)
print contacts
#########################################################################
## Read input file if a matrix is not provided, or cluster file that was just made by R if a matrix was provided ##
sorted_communities = options.filename
sorted_communities.replace('.txt','')
sorted_communities += "_sorted.txt"
p = subprocess.Popen("cp -p "+str(options.filename)+" "+str(options.filename)+"_bak.txt"+" ; sort -n -k 3 -k 1 "+str(options.filename)+" > "+str(sorted_communities), shell=True)
os.waitpid(p.pid, 0)
try:
fil1 = open(options.filename,'r')
except:
print "cannot open input file "+str(options.filename)
try:
fil2 = open(sorted_communities,'r')
except:
print "cannot open input file "+str(sorted_communities)
sys.exit(1)
inlines1 = fil1.readlines()
fil1.close()
inlines2 = fil2.readlines()
fil2.close()
maxclusternum = -1 #maximum cluster num
reslist = [] #list of residues
reslist_sorted = []
#read communities_sorted.txt file
#myline = inlines[-1]
#matchline=re.compile(r'([0-9]*)\s*([A-Z][A-Z,0-9][A-Z,0-9])([0-9]+)([S]*)\s+([0-9]+)')
#matches = matchline.match(myline)
#if matches.group(2) != None:
# name = matches.group(2)
# if matches.group(3) != None:
# number = int(matches.group(3))
# if matches.group(4) == 'S':
# tag = matches.group(4)
# else:
# tag = ''
# if matches.group(5) != None:
# clusternum = int(matches.group(5))
# last_community = clusternum
community_list = [] #
templist = []
communities_lengths = []
for line in inlines1[0:]:
matchline=re.compile(r'([0-9]*)\s*([A-Z][A-Z,0-9][A-Z,0-9])([0-9]+)([S]*)\s+([0-9]+)')
#print line
matches = matchline.match(line)
#print [matches.group(i) for i in range(5)]
if matches.group(2) != None:
name = matches.group(2)
if matches.group(3) != None:
number = int(matches.group(3))
if matches.group(4) == 'S':
tag = matches.group(4)
else:
tag = ''
if matches.group(5) != None:
clusternum = int(matches.group(5))
newres = Res(name,number,tag,clusternum) #offset residue number by option
reslist.append(newres)
if(clusternum > maxclusternum):
maxclusternum = clusternum
this_community = 0
for line in inlines2[0:]:
matchline=re.compile(r'([0-9]*)\s*([A-Z][A-Z,0-9][A-Z,0-9])([0-9]+)([S]*)\s+([0-9]+)')
#print line
matches = matchline.match(line)
#print [matches.group(i) for i in range(5)]
if matches.group(2) != None:
name = matches.group(2)
if matches.group(3) != None:
number = int(matches.group(3))
if matches.group(4) == 'S':
tag = matches.group(4)
else:
tag = ''
if matches.group(5) != None:
clusternum = int(matches.group(5))
if clusternum > this_community:
communities_lengths.append(len(templist))
community_list.append(templist)
templist = [] #start building up next community
this_community = clusternum
print "community boundary: "+str(line)
newres = Res(name,number,tag,clusternum) #offset residue number by option
reslist_sorted.append(newres)
templist.append(newres)
if(clusternum > maxclusternum):
maxclusternum = clusternum
#last iteration
communities_lengths.append(len(templist))
community_list.append(templist)
this_community = clusternum
print "community boundary: "+str(line)
# here's where we build up the new mutual information matrix
print "max community number: "+str(maxclusternum)
print "length of reslist: "+str(len(reslist))
print "communities lengths:"+str(communities_lengths)
communities_sorted_mutinf = mutinf_matrix.copy()
rownames_new = []
colnames_new = []
mutinf_contacts_filtered = zeros((len(reslist), len(reslist)), float64)
mutinf_between_communities = zeros((maxclusternum + 1, maxclusternum + 1), float64)
mutinf_between_communities_contacts_filtered = zeros((maxclusternum + 1, maxclusternum + 1), float64)
communities_sorted_mutinf_blocks = zeros((len(reslist), len(reslist)), float64)
communities_sorted_mutinf_contacts_filtered = zeros((len(reslist), len(reslist)), float64)
communities_sorted_mutinf_contacts_filtered_blocks = zeros((len(reslist), len(reslist)), float64)
avg_distance_variances_within_and_between_communities = zeros((maxclusternum + 1, maxclusternum + 1), float64)
for i, myres1 in zip(range(len(reslist_sorted)),reslist_sorted):
for ik, ikres1 in zip(range(len(reslist)), reslist):
if myres1 == ikres1: #another way of getting index
myindex1 = ik
rownames_new.append(rownames[myindex1] )
colnames_new.append(rownames[myindex1] )
for j, myres2 in zip(range(len(reslist_sorted)),reslist_sorted):
myindex2 = reslist.index(myres2)
for jk, jkres2 in zip(range(len(reslist)), reslist):
if myres2 == jkres2:
myindex2 = jk
communities_sorted_mutinf[i,j] = mutinf_matrix[myindex1,myindex2] #reorders matrix
if options.contacts != None:
communities_sorted_mutinf_contacts_filtered[i,j] = contacts[myindex1,myindex2] * mutinf_matrix[myindex1,myindex2]
mutinf_between_communities_contacts_filtered[myres1.clusternum, myres2.clusternum] += contacts[myindex1,myindex2] * mutinf_matrix[myindex1,myindex2] #/ (1.0 * len(community_list[i]) * len(community_list[j]))
mutinf_contacts_filtered[i,j] = contacts[i,j] * mutinf_matrix[i,j]
if options.dist_variance != None:
#print "dist matrix element: "+str( dist_variance_matrix[myindex1,myindex2])
#print "clusternum1 :"+str(myres1.clusternum)
#print "clusternum2 :"+str(myres2.clusternum)
avg_distance_variances_within_and_between_communities[myres1.clusternum, myres2.clusternum] += dist_variance_matrix[myindex1,myindex2] / (1.0 * communities_lengths[myres1.clusternum] * communities_lengths[myres2.clusternum])
mutinf_between_communities[myres1.clusternum, myres2.clusternum] += mutinf_matrix[myindex1,myindex2] / (1.0 * communities_lengths[myres1.clusternum] * communities_lengths[myres2.clusternum]) #average mutinf btw communities
for i, myres1 in zip(range(len(reslist_sorted)),reslist_sorted):
myindex1 = reslist.index(myres1)
for j, myres2 in zip(range(len(reslist_sorted)),reslist_sorted):
communities_sorted_mutinf_blocks[i,j] = mutinf_between_communities[myres1.clusternum, myres2.clusternum]
if options.contacts != None:
communities_sorted_mutinf_contacts_filtered_blocks[i,j] = mutinf_between_communities_contacts_filtered[myres1.clusternum, myres2.clusternum]
output_matrix(options.prefix+options.outfile,communities_sorted_mutinf,rownames_new,colnames_new)
rownames_communities = []
for i in range(maxclusternum + 1):
rownames_communities.append("C"+str(i+1))
colnames_communities = rownames_communities
output_matrix(str(options.prefix)+options.communities_mutinf, mutinf_between_communities, rownames_communities, colnames_communities)
output_matrix(str(options.prefix)+options.communities_mutinf+"_avg_distance_variances.txt", avg_distance_variances_within_and_between_communities , rownames_communities, colnames_communities)
output_matrix(str(options.prefix)+options.communities_mutinf+"_blocks.txt", communities_sorted_mutinf_blocks, rownames_new, colnames_new)
output_matrix(str(options.prefix)+"mutinf_unclustered_contacts_filtered.txt", mutinf_contacts_filtered , rownames, colnames)
### NEED TO ADD ABOVE TO R SCRIPT, and HOOKS TO SPECIFY DISTANCE VARIANCE MATRICES, this will tell us if communities are semi-rigid elements ###
output_matrix(str(options.prefix)+options.communities_mutinf+"_contacts_filtered.txt", communities_sorted_mutinf_contacts_filtered, rownames_new, colnames_new)
output_matrix(str(options.prefix)+options.communities_mutinf+"_contacts_filtered_blocks.txt", communities_sorted_mutinf_contacts_filtered_blocks, rownames_new, colnames_new)
num_residues = len(reslist)
R_code1 = """
library(marray)
library(fields)
library(cluster)
"""
Rfile.write(R_code1)
#Rfile.write("prec <- read.table(\""+str(options.matrix)+"\")\n")
#Rfile.write("precdist <- read.table(\""+str(distfile)+"\")\n")
Rfile.write("mutinf_unclustered <- read.table(\""+str(options.mutinf)+"\")\n")
Rfile.write("new_mutinf <- read.table(\""+str(options.prefix)+str(options.outfile)+"\")\n")
Rfile.write("mutinf_unclustered_contacts_filtered <- read.table(\""+str(options.prefix)+"mutinf_unclustered_contacts_filtered.txt"+"\")\n")
Rfile.write("communities_mutinf <- read.table(\""+str(options.prefix)+str(options.communities_mutinf)+"\")\n")
Rfile.write("communities_mutinf_blocks <- read.table(\""+str(options.prefix)+str(options.communities_mutinf+"_blocks.txt")+"\")\n")
Rfile.write("communities_mutinf_contacts_filtered <- read.table(\""+str(options.prefix)+str(options.communities_mutinf+"_contacts_filtered.txt")+"\")\n")
Rfile.write("communities_mutinf_contacts_filtered_blocks <- read.table(\""+str(options.prefix)+str(options.communities_mutinf+"_contacts_filtered_blocks.txt")+"\")\n")
Rfile.write("pdf(\""+str(options.prefix)+"mutinf_communities_reordered.pdf""\", width=96,height=96)\n")
mydict = {'mutinf_unclustered': options.prefix+"mutinf_unclustered.pdf" ,'mutinf_unclustered_contacts_filtered' : str(options.prefix)+"mutinf_unclustered_contacts_filtered.pdf", 'mutinf_between_communities': options.prefix+"mutinf_between_communities.pdf", 'mutinf_between_communities_contacts_filtered_blocks': options.prefix+"mutinf_between_communities_contacts_filtered_blocks.pdf", 'mutinf_between_communities_blocks': options.prefix+"mutinf_between_communities_blocks.pdf",'mutinf_between_communities_contacts_filtered': options.prefix+"mutinf_between_communities_contacts_filtered.pdf", 'colorbar_mutinf': options.prefix+"colorbar_mutinf.pdf", 'colorbar_mutinf_between_communities': options.prefix+"colorbar_mutinf_between_communities.pdf"}
R_code2 = """
library(marray)
#heatmap(as.matrix(new_mutinf), col=maPalette(low="white",mid="blue",high="red", k=50), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
heatmap(as.matrix(new_mutinf), col=maPalette(low="white",mid="gray",high="red", k=50), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
pdf("%(mutinf_unclustered)s", width=96, height=96)
heatmap(as.matrix(mutinf_unclustered), col=maPalette(low="white",mid="gray",high="red", k=50), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
pdf("%(mutinf_unclustered_contacts_filtered)s", width=96, height=96)
heatmap(as.matrix(mutinf_unclustered_contacts_filtered), col=maPalette(low="white",mid="gray",high="red", k=50), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
#image(as.matrix(new_mutinf),col = maPalette(low = "white", high = "black", k =50), add = FALSE, xaxt= "n", yaxt = "n",xlab="Residue i",ylab="Residue j")
#axis(2,labels=c(0,20,40,60,80,100,120,132),at=c(1-0,1-20/132,1-40/132,1-60/132,1-80/132,1-100/132,1-120/132,1-132/132))
#axis(1,labels=c(0,20,40,60,80,100,120,132),at=c(0,20/132,40/132,60/132,80/132,100/132,120/132,132/132))
#rect(72/132,1-46/132,86/132,1-29/132,border="red")
#rect(29/132,1-46/132,46/132,1-29/132,border="red")
#rect(72/132,1-86/132,86/132,1-72/132,border="red")
#rect(72/132,1-6/132,86/132,1-4/132,border="blue")
dev.off()
pdf("%(mutinf_between_communities)s")
#heatmap(as.matrix(communities_mutinf), col=maPalette(low="white",mid="blue",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, oldstyle=FALSE,symm = TRUE)
heatmap(as.matrix(communities_mutinf), col=maPalette(low="white",mid="gray",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, oldstyle=FALSE,symm = TRUE)
pdf("%(mutinf_between_communities_contacts_filtered_blocks)s",width=96,height=96)
#heatmap(as.matrix(communities_mutinf_contacts_filtered_blocks), col=maPalette(low="white",mid="blue",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, oldstyle=FALSE,symm = TRUE)
heatmap(as.matrix(communities_mutinf_contacts_filtered_blocks), col=maPalette(low="white",mid="gray",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, oldstyle=FALSE,symm = TRUE)
pdf("%(mutinf_between_communities_blocks)s", width=96,height=96)
#heatmap(as.matrix(communities_mutinf_blocks), col=maPalette(low="white",mid="blue",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
heatmap(as.matrix(communities_mutinf_blocks), col=maPalette(low="white",mid="gray",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
pdf("%(mutinf_between_communities_contacts_filtered)s", width=96,height=96)
#heatmap(as.matrix(communities_mutinf_contacts_filtered), col=maPalette(low="white",mid="blue",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
heatmap(as.matrix(communities_mutinf_contacts_filtered), col=maPalette(low="white",mid="gray",high="red"), add = FALSE, xaxs = "i", yaxs = "i", xaxt = "n", yaxt = "n", Rowv = NA, Colv = NA, cexRow=0.8, cexCol=0.8, oldstyle=FALSE,symm = TRUE)
pdf("%(colorbar_mutinf)s",width=6,height=2)
#maColorBar(seq(min(new_mutinf),max(new_mutinf),by=(max(new_mutinf)-min(new_mutinf))/50), horizontal=TRUE, col = maPalette(low = "white", mid="blue", high = "red", k =50))
maColorBar(seq(min(new_mutinf),max(new_mutinf),by=(max(new_mutinf)-min(new_mutinf))/50), horizontal=TRUE, col = maPalette(low = "white", mid = "gray", high = "red", k =50))
pdf("%(colorbar_mutinf_between_communities)s",width=6,height=2)
#maColorBar(seq(min(communities_mutinf),max(communities_mutinf),by=(max(communities_mutinf)-min(communities_mutinf))/50), horizontal=TRUE, col = maPalette(low = "white", mid="blue", high = "red", k =50))
maColorBar(seq(min(communities_mutinf),max(communities_mutinf),by=(max(communities_mutinf)-min(communities_mutinf))/50), horizontal=TRUE, col = maPalette(low = "white", mid="gray", high="red", k =50))
#heatmap(x=as.matrix((S1M47_mutent)),zlim=c(0,max(S1M47_mutent)*1.0), col = maPalette(low = "white", mid = "blue", high = "red", k =50), add = FALSE, xaxs = "i", yaxs = "i",xaxt= "n", yaxt = "n", reorderfun = #function(d,w) rev(reorder(d,w)),revC=TRUE, cexRow=0.8, cexCol=0.8, oldstyle = FALSE,symm=TRUE )
#dev.off()
""" % mydict
Rfile.write(R_code2)
#Rfile.write("write.table(fit$cluster, \""+str(options.filename)+"\", quote=FALSE, sep='\t')")
Rfile.write("\n")
Rfile.write("\n")
Rfile.close()
### Run R to calculate clusters using multidimensional scaling #######
print "Running R on "+str(options.Rfile)
p = subprocess.Popen("cat "+str(options.Rfile)+" | R --no-save", shell=True)
os.waitpid(p.pid, 0)
return mutinf_between_communities
#return mutinf between communities filtered by contacts for further analysis, if desired
#return mutinf_between_communities_contacts_filtered
def cg_edgeweights_to_matrix_for_dendro_default_options():
class run_options:
filename = "communities.txt"
outfile = "mutinf_communities_reordered.txt"
begin = 0
Rfile = "heatmap_communities.txt"
mutinf = None
communities_mutinf = "mutinf_bewteen_communities.txt"
mutinf_output = "mutinf_unclustered.txt"
contacts = None
prefix = ""
dist_variance=None
my_options = run_options()
return my_options
if __name__ == "__main__":
usage="%prog -f communities.txt -t mypdb_mutinf_res_sum_0diag.txt "
parser=OptionParser(usage)
### options for k-means and multidimensional scaling
#parser.add_option("-s", "--structure", default=None, type="string", help="pdb file")
parser.add_option("-f", "--filename", default="communities.txt", type="string", help="space-delimeted text file with three columns: number residue name/number/tag cluster_number")
parser.add_option("-o", "--outfile", default="mutinf_communities_reordered.txt", type="string", help="filename for output mutual information matrix ordered by communities")
parser.add_option("-b", "--begin", default=0, type=int, help="first residue offset")
parser.add_option("-r", "--Rfile", default="heatmap_communities.txt",type="string", help="R commands file to be created")
parser.add_option("-t", "--mutinf", default=None, type="string", help="mutual information matrix filename")
parser.add_option("-m", "--communities_mutinf", default="mutinf_bewteen_communities.txt", type="string", help="mutual information between communities matrix filename")
parser.add_option("-c", "--contacts", default=None, type="string", help="matrix for contacts")
parser.add_option("-p", "--prefix", default="",type="string", help="prefix for output")
parser.add_option("-v", "--dist_variance", default=None, type="string", help="filename for distance variances")
## Generate cluster list using multidimensional scaling and kmeans in R ##
(options,args)=parser.parse_args()
print "options"
print options
#distfile = options.matrix
#distfile = distfile.replace('dist_variance', 'dist')
mutinf_between_communities = do_cg_edgeweights_from_mutinf_matrix(options)
|
chris-lee-mc/MutInf
|
cg_edgeweights_to_matrix_for_dendro.py
|
Python
|
gpl-3.0
| 24,900
|
[
"PyMOL"
] |
9d1a268dea9876b252e4dcb04b8f2761abbd248ba74f8068971942a4fcf7aed6
|
#/**********************************************************************
#** This program is part of 'MOOSE', the
#** Messaging Object Oriented Simulation Environment.
#** Copyright (C) 2003-2014 Upinder S. Bhalla. and NCBS
#** It is made available under the terms of the
#** GNU Lesser General Public License version 2.1
#** See the file COPYING.LIB for the full notice.
#**********************************************************************/
'''
This LIF network with Ca plasticity is based on:
David Higgins, Michael Graupner, Nicolas Brunel
Memory Maintenance in Synapses with Calcium-Based
Plasticity in the Presence of Background Activity
PLOS Computational Biology, 2014.
Author: Aditya Gilra, NCBS, Bangalore, October, 2014.
'''
## import modules and functions to be used
import numpy as np
import matplotlib.pyplot as plt
import random
import time
import moose
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
moose.seed(100) # set seed for reproducibility of simulations
#############################################
# All parameters as per:
# David Higgins, Michael Graupner, Nicolas Brunel
# Memory Maintenance in Synapses with Calcium-Based
# Plasticity in the Presence of Background Activity
# PLOS Computational Biology, 2014.
#############################################
#############################################
# Neuron model
#############################################
# equation: dv/dt = (1/taum)*(-(v-el)) + inp
# with spike when v>vt, reset to vr
el = -70e-3 #V # Resting potential
vt = -50e-3 #V # Spiking threshold
Rm = 20e6 #Ohm # Only taum is needed, but LIF neuron accepts
Cm = 1e-9 #F # Rm and Cm and constructs taum=Rm*Cm
taum = Rm*Cm #s # Membrane time constant is 20 ms
vr = -60e-3 #V # Reset potential
Iinject = 11.5e-3/Rm # constant current injection into LIF neuron
# same as setting el=-70+15=-55 mV and inp=0
noiseInj = True # inject noisy current into each cell: boolean
noiseInjSD = 5e-3/Rm #A # SD of noise added to 'current'
# SD*sqrt(taum) is used as noise current SD
#############################################
# Network parameters: numbers
#############################################
red_fact = 10 # reduction factor for N,C,J
N = 10000/red_fact # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
#############################################
# Simulation parameters
#############################################
simtime = 1200.0 #s # Simulation time
dt = 1e-3 #s # time step
plotDt = 1.0 #s # Time step for storing output.
#############################################
# Network parameters: synapses (not for ExcInhNetBase)
#############################################
## With each presynaptic spike in exc / inh neuron,
## J / -g*J is added to post-synaptic Vm -- delta-fn synapse
## Since LIF neuron used below is derived from Compartment class,
## conductance-based synapses (SynChan class) can also be used.
C = 500/red_fact # Number of incoming connections on each neuron (exc or inh)
# 5% conn prob between any two neurons
# Since we reduced N from 10000 to 1000, C = 50 instead of 500
# but we need to increase J by 10 to maintain total input per neuron
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
J = 0.2e-3 #V # exc strength is J (in V as we add to voltage)
# Critical J is ~ 0.45e-3 V in paper for N = 10000, C = 1000
# See what happens for J = 0.2e-3 V versus J = 0.8e-3 V
J *= red_fact # Multiply J by red_fact to compensate C/red_fact.
g = 4.0 # -gJ is the inh strength. For exc-inh balance g >~ f(1-f)=4
syndelay = dt # synaptic delay:
refrT = 0.0 # s # absolute refractory time
#############################################
# Ca Plasticity parameters: synapses (not for ExcInhNetBase)
#############################################
CaPlasticity = True # set it True or False to turn on/off plasticity
tauCa = 22.6936e-3 # s # Ca decay time scale
tauSyn = 346.3615 # s # synaptic plasticity time scale
## in vitro values in Higgins et al 2014, faster plasticity
CaPre = 0.56175 # mM
CaPost = 1.2964 # mM
## in vivo values in Higgins et al 2014, slower plasticity
#CaPre = 0.33705 # mM
#CaPost = 0.74378 # mM
delayD = 4.6098e-3 # s # CaPre is added to Ca after this delay
# proxy for rise-time of NMDA
thetaD = 1.0 # mM # depression threshold for Ca
thetaP = 1.3 # mM # potentiation threshold for Ca
gammaD = 331.909 # factor for depression term
gammaP = 725.085 # factor for potentiation term
eqWeight = 0.16 # initial synaptic weight
# gammaP/(gammaP+gammaD) = eq weight w/o noise
# but see eqn (22), noiseSD also appears
bistable = True # if bistable is True, use bistable potential for weights
noisy = True # use noisy weight updates given by noiseSD
noiseSD = 3.3501 # if noisy, use noiseSD (3.3501 from Higgins et al 2014)
#noiseSD = 0.1 # if bistable==False, use a smaller noise than in Higgins et al 2014
#############################################
# Exc-Inh network base class without connections
#############################################
class ExcInhNetBase:
"""Simulates and plots LIF neurons (exc and inh separate).
Author: Aditya Gilra, NCBS, Bangalore, India, October 2014
"""
def __init__(self,N=N,fexc=fexc,el=el,vt=vt,Rm=Rm,Cm=Cm,vr=vr,\
refrT=refrT,Iinject=Iinject):
""" Constructor of the class """
self.N = N # Total number of neurons
self.fexc = fexc # Fraction of exc neurons
self.NmaxExc = int(fexc*N) # max idx of exc neurons, rest inh
self.el = el # Resting potential
self.vt = vt # Spiking threshold
self.taum = taum # Membrane time constant
self.vr = vr # Reset potential
self.refrT = refrT # Absolute refractory period
self.Rm = Rm # Membrane resistance
self.Cm = Cm # Membrane capacitance
self.Iinject = Iinject # constant input current
self.noiseInjSD = noiseInjSD # SD of injected noise
self.simif = False # whether the simulation is complete
self._setup_network()
def __str__(self):
return "LIF network of %d neurons "\
"having %d exc." % (self.N,self.NmaxExc)
def _setup_network(self):
"""Sets up the network (_init_network is enough)"""
self.network = moose.LIF( 'network', self.N );
moose.le( '/network' )
self.network.vec.Em = self.el
self.network.vec.thresh = self.vt
self.network.vec.refractoryPeriod = self.refrT
self.network.vec.Rm = self.Rm
self.network.vec.vReset = self.vr
self.network.vec.Cm = self.Cm
if not noiseInj:
self.network.vec.inject = self.Iinject
else:
## inject a constant + noisy current
## values are set in self.simulate()
self.noiseTables = moose.StimulusTable('noiseTables',self.N)
moose.connect( self.noiseTables, 'output', \
self.network, 'setInject', 'OneToOne')
def _init_network(self,v0=el):
"""Initialises the network variables before simulation"""
self.network.vec.initVm = v0
def simulate(self,simtime=simtime,dt=dt,plotif=False,**kwargs):
self.dt = dt
self.simtime = simtime
self.T = np.ceil(simtime/dt)
self.trange = np.arange(0,self.simtime,dt)
for i in range(self.N):
if noiseInj:
## Gaussian white noise SD added every dt interval should be
## divided by sqrt(dt), as the later numerical integration
## will multiply it by dt.
## See the Euler-Maruyama method, numerical integration in
## http://www.scholarpedia.org/article/Stochastic_dynamical_systems
self.noiseTables.vec[i].vector = self.Iinject + \
np.random.normal( \
scale=self.noiseInjSD*np.sqrt(self.Rm*self.Cm/self.dt), \
size=int(self.T)
) # scale = SD
self.noiseTables.vec[i].stepSize = 0 # use current time
# as x value for interpolation
self.noiseTables.vec[i].stopTime = self.simtime
self._init_network(**kwargs)
if plotif:
self._init_plots()
# moose simulation
#moose.useClock( 1, '/network', 'process' )
#moose.setClock( 0, dt )
#moose.setClock( 1, dt )
#moose.setClock( 2, dt )
#moose.setClock( 3, dt )
#moose.setClock( 9, dt )
## Do need to set the dt for MOOSE clocks
for i in range(10):
moose.setClock( i, dt )
moose.setClock( 18, plotDt )
t1 = time.time()
print('reinit MOOSE -- takes a while ~20s.')
moose.reinit()
print(('reinit time t = ', time.time() - t1))
t1 = time.time()
print('starting')
simadvance = self.simtime / 50.0
for i in range( 50 ):
moose.start( simadvance )
print(('at t = ', i * simadvance, 'realtime = ', time.time() - t1))
#moose.start(self.simtime)
print(('runtime for ', self.simtime, 'sec, is t = ', time.time() - t1))
if plotif:
self._plot()
def _init_plots(self):
## make a few tables to store a few Vm-s
numVms = 10
self.plots = moose.Table2( '/plotVms', numVms )
## draw numVms out of N neurons
nrnIdxs = random.sample(list(range(self.N)),numVms)
for i in range( numVms ):
moose.connect( self.network.vec[nrnIdxs[i]], 'VmOut', \
self.plots.vec[i], 'input')
## make self.N tables to store spikes of all neurons
self.spikes = moose.Table2( '/plotSpikes', self.N )
moose.connect( self.network, 'spikeOut', \
self.spikes, 'input', 'OneToOne' )
## make 2 tables to store spikes of all exc and all inh neurons
self.spikesExc = moose.Table2( '/plotSpikesAllExc' )
for i in range(self.NmaxExc):
moose.connect( self.network.vec[i], 'spikeOut', \
self.spikesExc, 'input' )
self.spikesInh = moose.Table2( '/plotSpikesAllInh' )
for i in range(self.NmaxExc,self.N):
moose.connect( self.network.vec[i], 'spikeOut', \
self.spikesInh, 'input' )
def _plot(self):
""" plots the spike raster for the simulated net"""
#############################################
# Exc-Inh network class with Ca plasticity based connections
# (inherits from ExcInhNetBase)
#############################################
class ExcInhNet(ExcInhNetBase):
""" Recurrent network simulation """
def __init__(self,J=J,incC=C,fC=fC,scaleI=g,syndelay=syndelay,**kwargs):
"""Overloads base (parent) class"""
self.J = J # exc connection weight
self.incC = incC # number of incoming connections per neuron
self.fC = fC # fraction of exc incoming connections
self.excC = int(fC*incC)# number of exc incoming connections
self.scaleI = scaleI # inh weight is scaleI*J
self.syndelay = syndelay# synaptic delay
# call the parent class constructor
ExcInhNetBase.__init__(self,**kwargs)
def __str__(self):
return "LIF network of %d neurons "\
"of which %d are exc." % (self.N,self.NmaxExc)
def _init_network(self,**args):
ExcInhNetBase._init_network(self,**args)
def _init_plots(self):
ExcInhNetBase._init_plots(self)
self.recN = 50 # number of neurons for which to record weights and Ca
if CaPlasticity:
## make tables to store weights of recN exc synapses
## for each post-synaptic exc neuron
self.weights = moose.Table2( '/plotWeights', self.excC*self.recN )
for i in range(self.recN): # range(self.N) is too large
for j in range(self.excC):
moose.connect( self.weights.vec[self.excC*i+j], 'requestOut',
self.synsEE.vec[i*self.excC+j].synapse[0], 'getWeight')
self.CaTables = moose.Table2( '/plotCa', self.recN )
for i in range(self.recN): # range(self.N) is too large
moose.connect( self.CaTables.vec[i], 'requestOut',
self.synsEE.vec[i*self.excC+j], 'getCa')
def _setup_network(self):
## Set up the neurons without connections
ExcInhNetBase._setup_network(self)
## Now, add in the connections...
## Each pre-synaptic spike cause Vm of post-neuron to rise by
## synaptic weight in one time step i.e. delta-fn synapse.
## Since LIF neuron is derived from Compartment class,
## conductance-based synapses (SynChan class) can also be used.
## E to E synapses can be plastic
## Two ways to do this:
## 1) Each LIF neuron has one incoming postsynaptic SynHandler,
## which collects the activation from all presynaptic neurons,
## but then a common Ca pool is used.
## 2) Each LIF neuron has multiple postsyanptic SynHandlers,
## one for each pre-synaptic neuron, i.e. one per synapse,
## then each synapse has a different Ca pool.
## Here we go with option 2) as per Higgins et al 2014 (Brunel private email)
## separate SynHandler per EE synapse, thus NmaxExc*excC
if CaPlasticity:
self.synsEE = moose.GraupnerBrunel2012CaPlasticitySynHandler( \
'/network/synsEE', self.NmaxExc*self.excC )
else:
self.synsEE = moose.SimpleSynHandler( \
'/network/synsEE', self.NmaxExc*self.excC )
moose.useClock( 0, '/network/synsEE', 'process' )
## I to E synapses are not plastic
self.synsIE = moose.SimpleSynHandler( '/network/synsIE', self.NmaxExc )
## all synapses to I neurons are not plastic
self.synsI = moose.SimpleSynHandler( '/network/synsI', self.N-self.NmaxExc )
## connect all SynHandlers to their respective neurons
for i in range(self.NmaxExc):
moose.connect( self.synsIE.vec[i], 'activationOut', \
self.network.vec[i], 'activation' )
for i in range(self.NmaxExc,self.N):
moose.connect( self.synsI.vec[i-self.NmaxExc], 'activationOut', \
self.network.vec[i], 'activation' )
## Connections from some Exc/Inh neurons to each Exc neuron
for i in range(0,self.NmaxExc):
self.synsIE.vec[i].numSynapses = self.incC-self.excC
## Connections from some Exc neurons to each Exc neuron
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(list(range(self.NmaxExc)),self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synidx = i*self.excC+synnum
synHand = self.synsEE.vec[synidx]
## connect each synhandler to the post-synaptic neuron
moose.connect( synHand, 'activationOut', \
self.network.vec[i], 'activation' )
## important to set numSynapses = 1 for each synHandler,
## doesn't create synapses if you set the full array of SynHandlers
synHand.numSynapses = 1
synij = synHand.synapse[0]
connectExcId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
if CaPlasticity:
## set parameters for the Ca Plasticity SynHandler
## have to be set for each SynHandler
## doesn't set for full array at a time
synHand.CaInit = 0.0
synHand.tauCa = tauCa
synHand.tauSyn = tauSyn
synHand.CaPre = CaPre
synHand.CaPost = CaPost
synHand.delayD = delayD
synHand.thetaD = thetaD
synHand.thetaP = thetaP
synHand.gammaD = gammaD
synHand.gammaP = gammaP
synHand.weightMax = 1.0 # bounds on the weight
synHand.weightMin = 0.0
synHand.weightScale = \
self.J*2.0 # 0.2 mV, weight*weightScale is activation
# typically weight <~ 0.5, so activation <~ J
synHand.noisy = noisy
synHand.noiseSD = noiseSD
synHand.bistable = bistable
moose.connect( self.network.vec[i], \
'spikeOut', synHand, 'addPostSpike')
synij.weight = eqWeight # activation = weight*weightScale
# weightScale = 2*J
# weight <~ 0.5
## Randomly set 5% of them to be 1.0
if np.random.uniform()<0.05:
synij.weight = 1.0
else:
synij.weight = self.J # no weightScale here, activation = weight
## Connections from some Inh neurons to each Exc neuron
## draw inhC=incC-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(list(range(self.NmaxExc,self.N)),self.incC-self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsIE.vec[i].synapse[synnum]
connectInhId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = -self.scaleI*self.J # activation = weight
## Connections from some Exc/Inh neurons to each Inh neuron
for i in range(self.N-self.NmaxExc):
## each neuron has incC number of synapses
self.synsI.vec[i].numSynapses = self.incC
## draw excC number of neuron indices out of NmaxExc neurons
preIdxs = random.sample(list(range(self.NmaxExc)),self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsI.vec[i].synapse[synnum]
connectExcId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = self.J # activation = weight
## draw inhC=incC-excC number of neuron indices out of inhibitory neurons
preIdxs = random.sample(list(range(self.NmaxExc,self.N)),self.incC-self.excC)
## connect these presynaptically to i-th post-synaptic neuron
for synnum,preIdx in enumerate(preIdxs):
synij = self.synsI.vec[i].synapse[ self.excC + synnum ]
connectInhId = moose.connect( self.network.vec[preIdx], \
'spikeOut', synij, 'addSpike')
synij.delay = syndelay
synij.weight = -self.scaleI*self.J # activation = weight
moose.useClock( 0, '/network/synsIE', 'process' )
moose.useClock( 0, '/network/synsI', 'process' )
#############################################
# Analysis functions
#############################################
def rate_from_spiketrain(spiketimes,fulltime,dt,tau=50e-3):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units.
"""
sigma = tau/2.
## normalized Gaussian kernel, integral with dt is normed to 1
## to count as 1 spike smeared over a finite interval
norm_factor = 1./(np.sqrt(2.*np.pi)*sigma)
gauss_kernel = np.array([norm_factor*np.exp(-x**2/(2.*sigma**2))\
for x in np.arange(-5.*sigma,5.*sigma+dt,dt)])
kernel_len = len(gauss_kernel)
## need to accommodate half kernel_len on either side of fulltime
rate_full = np.zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
## only the middle fulltime part of the rate series
## This is already in Hz,
## since should have multiplied by dt for above convolution
## and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
#############################################
# Make plots
#############################################
def extra_plots(net):
## extra plots apart from the spike rasters
## individual neuron Vm-s
timeseries = net.trange
## individual neuron firing rates
## population firing rates
## Ca plasticity: weight vs time plots
if CaPlasticity:
## Ca versus time in post-synaptic neurons
for i in range(net.recN): # range(net.N) is too large
net.CaTables.vec[i].xplot( 'ca.xplot', 'Ca_' + str(i) )
for i in range(net.recN): # range(net.N) is too large
for j in range(net.excC):
k = net.excC*i+j
net.weights.vec[k].xplot( 'wt.xplot', 'w_' + str(k) )
## all EE weights are used for a histogram
weights = [ net.synsEE.vec[i*net.excC+j].synapse[0].weight \
for i in range(net.NmaxExc) for j in range(net.excC) ]
histo, edges = np.histogram( weights, bins=100 )
print()
print(histo)
print()
print(edges)
print()
plt.figure()
plt.hist(weights, bins=100)
plt.title("Histogram of efficacies")
plt.xlabel("Efficacy (arb)")
plt.ylabel("# per bin")
if __name__=='__main__':
## ExcInhNetBase has unconnected neurons,
## ExcInhNet connects them
## Instantiate either ExcInhNetBase or ExcInhNet below
#net = ExcInhNetBase(N=N)
net = ExcInhNet(N=N)
print(net)
## Important to distribute the initial Vm-s
## else weak coupling gives periodic synchronous firing
net.simulate(simtime,plotif=True, v0=np.random.uniform(el-20e-3,vt,size=N))
plt.figure()
extra_plots(net)
plt.show()
|
BhallaLab/moose-examples
|
paper-2015/Fig2_elecModels/Fig2A.py
|
Python
|
gpl-2.0
| 23,553
|
[
"Gaussian",
"MOOSE",
"NEURON"
] |
e1c5d1cc020392ef76255d1d28bc2974fe768fd02c62b18caa5837129bb72a49
|
from pdbfixer import PDBFixer
from simtk.openmm.app import PDBFile
fixer = PDBFixer(pdbid='3UE4')
fixer.removeChains(chainIds=['B'])
# Without fixer.missingResidues = {}, fixer.addMissingAtoms() throw an exception
# and if I call fixer.findMissingResidues() several terminal residues are added
fixer.missingResidues = {}
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.removeHeterogens(keepWater=False)
#fixer.addMissingHydrogens(7.0)
PDBFile.writeFile(fixer.topology, fixer.positions, open('../kinases/abl/3UE4-pdbfixer.pdb', 'w'))
|
juliebehr/kinase-benchmark
|
utils/pdbfix3UE4.py
|
Python
|
gpl-2.0
| 546
|
[
"OpenMM"
] |
453fea5839bba487e010110b7611e7a8dab82e3f8a78ef8ed855f09c58798cc5
|
'''
PyDelhi App:
- Displays Schedule: static
- Static map:
- Link to open location externally
- Talk/Workshop details
- Feedback
- Social Media:
- Facebook
- Twitter
'''
from datetime import datetime
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import StringProperty
class PyDelhiApp(App):
''' Our main app class
'''
about_text = StringProperty('')
time_left = StringProperty('')
def build(self):
self.about_text = '[b]About the conference[/b]\n\nPyDelhi conference is hosted annually by Pydelhi community with an aim to promote Python programming language. We provide a single platform to users from different spheres such as students, global entrepreneur and professionals from startup and established firms to connect and share their ideas. Experts from various domains showcase their use of Python besides discussing about the recent and upcoming trends in technology.\n\n\n[b]App Designed and implenented by PyDelhi Team visit us at [color=rgb(49,207,155)][ref=http://PyDelhi.org]http://PyDelhi.org[/ref][/color][/b]'
self.icon = 'data/icon.png'
def on_pause(self):
return True
def on_start(self):
Clock.schedule_interval(self.calc_time_left, 1)
def calc_time_left(self, dt):
td = datetime(2016, 3, 5, 9) - datetime.now()
days = td.days
hours, remainder = divmod(td.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
self.time_left = 'Days: {} {}:{}:{}'.format(days, hours, minutes, seconds)
if __name__ == '__main__':
PyDelhiApp().run()
|
stalwartrishabh/PyDelhiMobile
|
pydelhiapp/main.py
|
Python
|
agpl-3.0
| 1,599
|
[
"VisIt"
] |
b379401c6a32d731803e0b5f6ea93b60be9ca0df0eb7741b04887e9c60fb765d
|
import vtk
def main():
colors = vtk.vtkNamedColors()
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
style = vtk.vtkInteractorStyleTrackballActor()
iren.SetInteractorStyle(style)
# create source
sphereSource = vtk.vtkSphereSource()
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphereSource.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d('Chartreuse'))
# assign actor to the renderer
ren.AddActor(actor)
ren.SetBackground(colors.GetColor3d('PaleGoldenrod'))
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Visualization/InteractorStyleTrackballActor.py
|
Python
|
apache-2.0
| 962
|
[
"VTK"
] |
8d9a44d32216c87b417c754821f89e4ad63af7b8745164cf73317327e53e6d99
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel']
@inherit_doc
class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasElasticNetParam, HasFitIntercept,
HasStandardization, HasSolver, HasWeightCol, HasAggregationDepth, HasLoss,
JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the specified loss function, with regularization.
This supports two kinds of loss:
* squaredError (a.k.a squared loss)
* huber (a hybrid of squared error for relatively small errors and absolute error for \
relatively large ones, and we estimate the scale parameter from training data)
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
Note: Fitting with huber loss only supports none and L2 regularization.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight")
>>> model = lr.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.transform(test0).head().prediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().prediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
>>> model.write().format("pmml").save(model_path + "_2")
.. versionadded:: 1.4.0
"""
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
"robustness. Must be > 1.0. Only valid when loss is huber",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
@since("2.3.0")
def setEpsilon(self, value):
"""
Sets the value of :py:attr:`epsilon`.
"""
return self._set(epsilon=value)
@since("2.3.0")
def getEpsilon(self):
"""
Gets the value of epsilon or its default value.
"""
return self.getOrDefault(self.epsilon)
class LinearRegressionModel(JavaModel, JavaPredictionModel, GeneralJavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.3.0")
def scale(self):
"""
The value by which \|y - X'w\| is scaled down when loss is "huber", otherwise 1.0.
"""
return self._call_java("scale")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_lrt_summary = self._call_java("summary")
return LinearRegressionTrainingSummary(java_lrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns the explained variance regression score.
explainedVariance = 1 - variance(y - \hat{y}) / variance(y)
.. seealso:: `Wikipedia explain variation \
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.4.0")
def r2adj(self):
"""
Returns Adjusted R^2, the adjusted coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination, Adjusted R^2 \
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark versions.
"""
return self._call_java("r2adj")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.2.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
.. note:: Experimental
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
@inherit_doc
class IsotonicRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
isotonic = \
Param(Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = \
Param(Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
self._setDefault(isotonic=True, featureIndex=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
class IsotonicRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
class TreeEnsembleParams(DecisionTreeParams):
"""
Mixin for Decision Tree-based ensemble algorithms parameters.
"""
subsamplingRate = Param(Params._dummy(), "subsamplingRate", "Fraction of the training data " +
"used for learning each decision tree, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
supportedFeatureSubsetStrategies = ["auto", "all", "onethird", "sqrt", "log2"]
featureSubsetStrategy = \
Param(Params._dummy(), "featureSubsetStrategy",
"The number of features to consider for splits at each tree node. Supported " +
"options: " + ", ".join(supportedFeatureSubsetStrategies) + ", (0.0-1.0], [1-n].",
typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeEnsembleParams, self).__init__()
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("1.4.0")
def getSubsamplingRate(self):
"""
Gets the value of subsamplingRate or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("1.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
.. note:: Deprecated in 2.4.0 and will be removed in 3.0.0.
"""
return self._set(featureSubsetStrategy=value)
@since("1.4.0")
def getFeatureSubsetStrategy(self):
"""
Gets the value of featureSubsetStrategy or its default value.
"""
return self.getOrDefault(self.featureSubsetStrategy)
class TreeRegressorParams(Params):
"""
Private class to track supported impurity measures.
"""
supportedImpurities = ["variance"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeRegressorParams, self).__init__()
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class RandomForestParams(TreeEnsembleParams):
"""
Private class to track supported random forest parameters.
"""
numTrees = Param(Params._dummy(), "numTrees", "Number of trees to train (>= 1).",
typeConverter=TypeConverters.toInt)
def __init__(self):
super(RandomForestParams, self).__init__()
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("1.4.0")
def getNumTrees(self):
"""
Gets the value of numTrees or its default value.
"""
return self.getOrDefault(self.numTrees)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
"""
supportedLossTypes = ["squared", "absolute"]
@inherit_doc
class DecisionTreeRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
DecisionTreeParams, TreeRegressorParams, HasCheckpointInterval,
HasSeed, JavaMLWritable, JavaMLReadable, HasVarianceCol):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2, varianceCol="variance")
>>> model = dt.fit(df)
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@inherit_doc
class DecisionTreeModel(JavaModel, JavaPredictionModel):
"""
Abstraction for Decision Tree models.
.. versionadded:: 1.5.0
"""
@property
@since("1.5.0")
def numNodes(self):
"""Return number of nodes of the decision tree."""
return self._call_java("numNodes")
@property
@since("1.5.0")
def depth(self):
"""Return depth of the decision tree."""
return self._call_java("depth")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class TreeEnsembleModel(JavaModel):
"""
(private abstraction)
Represents a tree ensemble model.
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def getNumTrees(self):
"""Number of trees in ensemble."""
return self._call_java("getNumTrees")
@property
@since("1.5.0")
def treeWeights(self):
"""Return the weights for each tree"""
return list(self._call_java("javaTreeWeights"))
@property
@since("2.0.0")
def totalNumNodes(self):
"""Total number of nodes, summed over all trees in the ensemble."""
return self._call_java("totalNumNodes")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class DecisionTreeRegressionModel(DecisionTreeModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
RandomForestParams, TreeRegressorParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2, seed=42)
>>> model = rf.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
class RandomForestRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@inherit_doc
class GBTRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable, TreeRegressorParams):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxIter=5, maxDepth=2, seed=42)
>>> print(gbt.getImpurity())
variance
>>> print(gbt.getFeatureSubsetStrategy())
all
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
... ["label", "features"])
>>> model.evaluateEachIteration(validation, "squared")
[0.0, 0.0, 0.0, 0.0, 0.0]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
stepSize = Param(Params._dummy(), "stepSize",
"Step size (a.k.a. learning rate) in interval (0, 1] for shrinking " +
"the contribution of each estimator.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all")
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance", featureSubsetStrategy="all")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance", featureSubsetStrategy="all"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all")
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
class GBTRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@since("2.4.0")
def evaluateEachIteration(self, dataset, loss):
"""
Method to compute error or loss for every iteration of gradient boosting.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
:param loss:
The loss function used to compute error.
Supported options: squared, absolute
"""
return self._call_java("evaluateEachIteration", dataset, loss)
@inherit_doc
class AFTSurvivalRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> model = aftsr.fit(df)
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-------+---------+------+----------+
| label| features|censor|prediction|
+-------+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
|1.0E-40|(1,[],[])| 0.0| 1.0|
+-------+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
censorCol = Param(Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = \
Param(Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
class AFTSurvivalRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale paramter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
@since("2.0.0")
def predict(self, features):
"""
Predicted value
"""
return self._call_java("predict", features)
@inherit_doc
class GeneralizedLinearRegression(JavaEstimator, HasLabelCol, HasFeaturesCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol,
HasSolver, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> model = glr.fit(df)
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: irls.", typeConverter=TypeConverters.toString)
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
"or empty, we treat all instance offsets as 0.0",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
@since("2.3.0")
def setOffsetCol(self, value):
"""
Sets the value of :py:attr:`offsetCol`.
"""
return self._set(offsetCol=value)
@since("2.3.0")
def getOffsetCol(self):
"""
Gets the value of offsetCol or its default value.
"""
return self.getOrDefault(self.offsetCol)
class GeneralizedLinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_glrt_summary = self._call_java("summary")
return GeneralizedLinearRegressionTrainingSummary(java_glrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.2.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions.
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
.. note:: Experimental
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
def __repr__(self):
return self._call_java("toString")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
tejasapatil/spark
|
python/pyspark/ml/regression.py
|
Python
|
apache-2.0
| 66,081
|
[
"Gaussian"
] |
bf59a0ff815b28d5eb0a72a96ffc4203e99652ded53e3deeda2d90e64abae94b
|
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import moldesign as mdt
import moldesign.units as u
from moldesign import utils
PACKAGEPATH = os.path.abspath(os.path.dirname(mdt.__file__))
##### ATOM DATA
ATOMIC_NUMBERS = {'Ac': 89, 'Ag': 47, 'Al': 13, 'Am': 95, 'Ar': 18, 'As': 33, 'At': 85, 'Au': 79,
'B': 5, 'Ba': 56, 'Be': 4, 'Bh': 107, 'Bi': 83, 'Bk': 97, 'Br': 35, 'C': 6,
'Ca': 20, 'Cd': 48, 'Ce': 58, 'Cf': 98, 'Cl': 17, 'Cm': 96, 'Cn': 112, 'Co': 27,
'Cr': 24, 'Cs': 55, 'Cu': 29, 'Db': 105, 'Ds': 110, 'Dy': 66, 'Er': 68, 'Es': 99,
'Eu': 63, 'F': 9, 'Fe': 26, 'Fm': 100, 'Fr': 87, 'Ga': 31, 'Gd': 64, 'Ge': 32,
'H': 1, 'He': 2, 'Hf': 72, 'Hg': 80, 'Ho': 67, 'Hs': 108, 'I': 53, 'In': 49, 'Ir': 77,
'K': 19, 'Kr': 36, 'La': 57, 'Li': 3, 'Lr': 103, 'Lu': 71, 'Md': 101, 'Mg': 12,
'Mn': 25, 'Mo': 42, 'Mt': 109, 'N': 7, 'Na': 11, 'Nb': 41, 'Nd': 60, 'Ne': 10,
'Ni': 28, 'No': 102, 'Np': 93, 'O': 8, 'Os': 76, 'P': 15, 'Pa': 91, 'Pb': 82,
'Pd': 46, 'Pm': 61, 'Po': 84, 'Pr': 59, 'Pt': 78, 'Pu': 94, 'Ra': 88, 'Rb': 37,
'Re': 75, 'Rf': 104, 'Rg': 111, 'Rh': 45, 'Rn': 86, 'Ru': 44, 'S': 16, 'Sb': 51,
'Sc': 21, 'Se': 34, 'Sg': 106, 'Si': 14, 'Sm': 62, 'Sn': 50, 'Sr': 38, 'Ta': 73,
'Tb': 65, 'Tc': 43, 'Te': 52, 'Th': 90, 'Ti': 22, 'Tl': 81, 'Tm': 69, 'U': 92,
'Uuh': 116, 'Uuo': 118, 'Uup': 115, 'Uuq': 114, 'Uus': 117, 'Uut': 113, 'V': 23,
'W': 74, 'Xe': 54, 'Y': 39, 'Yb': 70, 'Zn': 30, 'Zr': 40}
ELEMENTS = {val: key for key, val in ATOMIC_NUMBERS.iteritems()}
SYMBOLS = ELEMENTS
# Isotopic masses for the most abundant species of each element
# from https://www.ncsu.edu/chemistry/msf/pdf/IsotopicMass_NaturalAbundance.pdf
ATOMIC_MASSES = {i: m*u.amu for i, m in zip(xrange(1, 55), (
1.007825, 4.002603, 7.016004, 9.012182, 11.009305, 12.0, 14.003074, 15.994915, 18.998403,
19.99244, 22.98977,
23.985042, 26.981538, 27.976927, 30.973762, 31.972071, 34.968853, 39.962383, 38.963707,
39.962591, 44.95591,
47.947947, 50.943964, 51.940512, 54.93805, 55.934942, 58.9332, 57.935348, 62.929601, 63.929147,
68.925581,
73.921178, 74.921596, 79.916522, 78.918338, 83.911507, 84.911789, 87.905614, 88.905848,
89.904704, 92.906378,
97.905408, 97.907216, 101.90435, 102.905504, 107.903894, 106.905093, 113.903358, 114.903878,
119.902197, 120.903818,
129.906223, 126.904468, 131.904154))}
for atnum, mass in ATOMIC_MASSES.items():
ATOMIC_MASSES[ELEMENTS[atnum]] = mass # index by atnum and symbol
ATOMIC_MASSES[-1] = -1.0 * u.amu
########## BIOPOLYMERS
# TODO: regenerate all this data to using the PDB Chemical Component Dictionary
BASES = 'C T G U A I'.split()
ALL_BASES = BASES + ['%s5' % b for b in BASES] + ['%s3' % b for b in BASES]
DBASES = ['D%s' % b for b in ALL_BASES]
RBASES = ['R%s' % b for b in ALL_BASES]
BACKBONES = {'dna': set(("P OP1 OP2 O5' O4' C5' C4' C3' O3' C2' C1' H1' H2'' H2' H3' H4' H5' H5'' "
"HO5' HO3'").split()),
'protein': set("N CA C O OXT H HA HA2 HA3 H2 H3".split())}
RESIDUE_ONE_LETTER = dict(ALA="A", ASX="B", CYS="C", ASP="D",
GLU="E", PHE="F", GLY="G", HIS="H", ILE="I",
LYS="K", LEU="L", MET="M", ASN="N", PRO="P",
GLN="Q", ARG="R", SER="S", THR="T", VAL="V",
TRP="W", XAA="X", TYR="Y", GLX="Z")
BIOPOLYMER_TYPES = set('dna rna protein'.split())
CHAIN_MONOMER_NAMES = {'dna': 'dna base',
'protein': 'amino acid',
'unkonwn': 'small molecule',
'water': 'water',
'solvent': 'solvent',
'ion': 'ion'}
# This is a very big dict, so we load it as a compressed database
_bondfilename = os.path.join(PACKAGEPATH, '_static_data/residue_bonds')
RESIDUE_BONDS = utils.CompressedJsonDbm(_bondfilename, 'r', dbm=utils.ReadOnlyDumb)
""" Database of bonds, as determined by the PDB chemical component dictionary.
Accessed similarly to a dict, but not stored in memory."""
AMINO_NAMES = {
"ALA": "Alanine",
"ARG": "Arginine",
"ASN": "Asparagine",
"ASP": "Aspartic acid",
"ASX": "ASP/ASN ambiguous",
"CYS": "Cysteine",
"GLN": "Glutamine",
"GLU": "Glutamic acid",
"GLX": "GLU/GLN ambiguous",
"GLY": "Glycine",
"HIS": "Histidine",
"HIE": "Histidine epsilon tautomer",
"HID": "Histidine delta tautomer",
"HIP": "Histidine ion",
"ILE": "Isoleucine",
"LEU": "Leucine",
"LYS": "Lysine",
"MET": "Methionine",
"PHE": "Phenylalanine",
"PRO": "Proline",
"SER": "Serine",
"THR": "Threonine",
"TRP": "Tryptophan",
"TYR": "Tyrosine",
"VAL": "Valine",
"UNK": "Undetermined"}
""" Maps 3-letter codes to common names """
NUCLEIC_NAMES = {
'A': 'Adenine',
'C': 'Cytosine',
'G': 'Guanine',
'I': 'Inosine', # actually not sure about this one
'T': 'Thymine',
'U': 'Uracil'}
""" Maps 1-letter nucleic base codes to common names """
IONS = {'NA': 'Na+',
'K': 'K+',
'MG': 'Mg+2',
'CA': 'Ca+2',
'F': 'F-',
'Cl': 'Cl-',
'Br': 'Br-',
'I': 'I-'}
""" Maps PDB residue names for ions to common names"""
RESTYPES = dict(
protein=set(AMINO_NAMES),
water={'HOH', 'H2O'},
solvent=set(),
dna=set(DBASES),
rna=set(RBASES),
unknown=set(),
ions=set(IONS))
RESIDUE_TYPES = {None: 'placeholder'}
for typename, namelist in RESTYPES.iteritems():
for resname in namelist: RESIDUE_TYPES[resname] = typename
RESIDUE_DESCRIPTIONS = dict(AMINO_NAMES)
for base, name in AMINO_NAMES.iteritems():
RESIDUE_DESCRIPTIONS['N' + name] = name + ' (N-terminal)'
RESIDUE_DESCRIPTIONS['C' + name] = name + ' (C-terminal)'
RESIDUE_DESCRIPTIONS.update(NUCLEIC_NAMES)
for base, name in NUCLEIC_NAMES.iteritems():
RESIDUE_DESCRIPTIONS['D' + base] = name + " (DNA)"
RESIDUE_DESCRIPTIONS['D' + base + '5'] = name + " (DNA, 5'-end)"
RESIDUE_DESCRIPTIONS['D' + base + '3'] = name + " (DNA, 3'-end)"
RESIDUE_DESCRIPTIONS['R' + base] = name + " (RNA)"
RESIDUE_DESCRIPTIONS['R' + base + '5'] = name + " (RNA, 5'-end)"
RESIDUE_DESCRIPTIONS['R' + base + '3'] = name + " (RNA, 3'-end)"
DEFAULT_FORCE_TOLERANCE = (0.0001 * u.hartree / u.bohr).defunits() # taken from GAMESS OPTTOL keyword
COLOR_LIST = ['lightgreen', 'lightblue', 'lightgrey',
'yellow', 'orange', 'purple', 'IndianRed',
'PaleTurquoise', 'OldLace', 'Thistle', 'pink']
class CyclicList(list):
def __getitem__(self, item):
return super(CyclicList, self).__getitem__(item % len(self))
try:
import webcolors
except ImportError:
color_rotation = CyclicList(COLOR_LIST)
else:
color_rotation = CyclicList(map(webcolors.name_to_hex, COLOR_LIST))
def print_environment():
"""For reporting bugs - spits out the user's environment"""
import sys
version = {}
for pkg in 'moldesign IPython ipywidgets jupyter matplotlib numpy docker pyccc distutils' \
'nbmolviz jupyter_client jupyter_core pint Bio openbabel simtk pyscf pip setuptools'\
.split():
try:
module = __import__(pkg)
except ImportError as e:
version[pkg] = str(e)
else:
try:
version[pkg] = module.__version__
except AttributeError as e:
version[pkg] = str(e)
env = {'platform': sys.platform,
'version': sys.version,
'prefix': sys.prefix}
try:
import platform
env['machine'] = platform.machine()
env['linux'] = platform.linux_distribution()
env['mac'] = platform.mac_ver()
env['windows'] = platform.win32_ver()
env['impl'] = platform.python_implementation()
env['arch'] = platform.architecture()
env['system'] = platform.system()
env['python_build'] = platform.python_build()
env['platform_version'] = platform.version()
except Exception as e:
env['platform_exception'] = str(e)
print json.dumps({'env': env,
'versions': version})
|
tkzeng/molecular-design-toolkit
|
moldesign/data.py
|
Python
|
apache-2.0
| 8,922
|
[
"GAMESS",
"PySCF"
] |
6f47b221cf0c3d74e8623b69ce9af64c91055e8142decc112a938f66be1f1e74
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import json
import os
import re
import sys
import yaml
from google.cloud import storage
from buildtool import check_subprocess, run_subprocess
"""Provides a utility to clean up the Google Cloud VM images produced during a
build of Spinnaker.
Only a small subset of VM images produced during a Spinnaker build are
referenced (by component version) by a Spinnaker bill of material (BOM). The
rest of the unreferenced VM images need periodically deleted. This script will
be run periodically to reap old, unreferenced images.
"""
RELEASED_VERSION_MATCHER = re.compile('^[0-9]+\.[0-9]+\.[0-9]+\.yml$')
SERVICES = [
'clouddriver',
'deck',
'echo',
'front50',
'gate',
'igor',
'orca',
'rosco',
'fiat'
]
PUBLISHED_TAG_KEY = 'published'
def __partition_boms(gcs_client, bucket_name):
def __bom_to_tag(bom_blob):
name = os.path.basename(bom_blob.name)
return RELEASED_VERSION_MATCHER.match(name)
def __bom_to_version(bom_blob):
return os.path.basename(bom_blob.name).replace('.yml', '')
bucket = gcs_client.get_bucket(bucket_name)
all_bom_blobs = [b for b in bucket.list_blobs(prefix='bom') if b.name.endswith('.yml')]
bom_contents_by_name = {__bom_to_version(b): b.download_as_string() for b in all_bom_blobs}
versions_to_tag = [__bom_to_version(bom) for bom in all_bom_blobs if __bom_to_tag(bom)]
possible_versions_to_delete = [__bom_to_version(bom) for bom in all_bom_blobs if not __bom_to_tag(bom)]
return (versions_to_tag, possible_versions_to_delete, bom_contents_by_name)
def __image_age_days(image_json):
# HACK: Cut off the timezone because strptime() can't deal with timezones.
# Timezone offset is 5 characters of the form: +00:00 or -00:00.
timestamp = image_json['creationTimestamp'][:len(image_json['creationTimestamp'])-6]
time_created = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
now = datetime.datetime.utcnow()
return (now - time_created).days
def __tag_images(versions_to_tag, project, account, project_images, bom_contents_by_name):
images_to_tag = set([])
for bom_version in versions_to_tag:
to_tag = [i for i in __derive_images_from_bom(bom_version, bom_contents_by_name) if i in project_images]
images_to_tag.update(to_tag)
for image in images_to_tag:
return_code, stdout = run_subprocess(
'gcloud compute images describe'
' --project={project} --account={account} --format=json {image}'
.format(project=project, account=account, image=image), echo=False)
# Adding labels is idempotent, adding the same label again doesn't break anything.
if not return_code:
payload_str = stdout.strip()
timestamp = json.loads(payload_str)['creationTimestamp']
timestamp = timestamp[:timestamp.index('T')]
check_subprocess(
'gcloud compute images add-labels --project={project} --account={account} --labels={key}={timestamp} {image}'
.format(project=project, account=account, key=PUBLISHED_TAG_KEY, timestamp=timestamp, image=image))
def __write_image_delete_script(possible_versions_to_delete, days_before, project,
account, project_images, bom_contents_by_name):
images_to_delete = set([])
print 'Calculating images for {} versions to delete.'.format(len(possible_versions_to_delete))
for bom_version in possible_versions_to_delete:
deletable = [i for i in __derive_images_from_bom(bom_version, bom_contents_by_name) if i in project_images]
images_to_delete.update(deletable)
delete_script_lines = []
for image in images_to_delete:
return_code, stdout = run_subprocess(
'gcloud compute images describe'
' --project={project} --account={account} --format=json {image}'
.format(project=project, account=account, image=image), echo=False)
json_str = ''
if return_code:
# Some BOMs may refer to service versions without HA images.
print('Lookup for image {image} in project {project} failed, ignoring'.format(image=image, project=project))
continue
else:
json_str = stdout.strip()
payload = json.loads(json_str)
if __image_age_days(payload) > days_before:
labels = payload.get('labels', None)
if not labels or not PUBLISHED_TAG_KEY in labels:
line = 'gcloud compute images delete --project={project} --account={account} {image} -q'.format(project=project, account=account, image=image)
delete_script_lines.append(line)
delete_script = '\n'.join(delete_script_lines)
timestamp = '{:%Y%m%d%H%M%S}'.format(datetime.datetime.utcnow())
script_name = 'delete-images-{}'.format(timestamp)
with open(script_name, 'w') as script:
script.write(delete_script)
print 'Wrote image janitor script to {}'.format(script_name)
def __derive_images_from_bom(bom_version, contents_by_name):
bom_content_str = contents_by_name[bom_version]
bom_dict = yaml.safe_load(bom_content_str)
service_entries = bom_dict['services']
return [__format_image_name(s, service_entries) for s in SERVICES]
def __format_image_name(service_name, service_entries):
service_version = service_entries[service_name]['version']
dash_version = service_version.replace('.', '-')
return 'spinnaker-{service}-{version}'.format(service=service_name,
version=dash_version)
def __delete_unused_bom_images(options):
client = None
if options.json_path:
client = storage.Client.from_service_account_json(options.json_path)
else:
client = storage.Client()
versions_to_tag, possible_versions_to_delete, bom_contents_by_name = __partition_boms(client, options.bom_bucket_name)
if options.additional_boms_to_tag:
additional_boms_to_tag = options.additional_boms_to_tag.split(',')
print('Adding additional BOM versions to tag: {}'.format(additional_boms_to_tag))
versions_to_tag.extend(additional_boms_to_tag)
print('Tagging versions: {}'.format(versions_to_tag))
print('Deleting versions: {}'.format(possible_versions_to_delete))
project = options.project
service_account = options.service_account
image_list_str = check_subprocess('gcloud compute images list --format=json --project={project} --account={account}'
.format(project=project, account=service_account), echo=False)
image_list = json.loads(image_list_str)
project_images = set([image['name'] for image in image_list])
__tag_images(versions_to_tag, project, service_account, project_images,
bom_contents_by_name)
__write_image_delete_script(possible_versions_to_delete, options.days_before, project,
service_account, project_images,
bom_contents_by_name)
def init_argument_parser(parser):
parser.add_argument('--additional_boms_to_tag', default='',
help='Comma-delimited list of additional BOM versions to tag'
'to avoid deletion.')
parser.add_argument('--bom_bucket_name', default='halconfig',
help='The name of the Halyard bucket storing the BOMs.')
parser.add_argument('--days_before', default=14,
help='Max age in days of nightly build BOMs to save.')
parser.add_argument('--json_path', default='',
help='Path to the service account credentials with access to the BOM bucket.')
parser.add_argument('--project', default='', required=True,
help='GCP project the HA images are stored in.')
parser.add_argument('--service_account', default='', required=True,
help='Name of the service account to manipulate images with.')
def main():
parser = argparse.ArgumentParser()
init_argument_parser(parser)
options = parser.parse_args()
__delete_unused_bom_images(options)
if __name__ == '__main__':
sys.exit(main())
|
skim1420/spinnaker
|
google/release/ha_image_janitor.py
|
Python
|
apache-2.0
| 8,514
|
[
"ORCA"
] |
58229bb5300733761a097fe7e054b5e545c0dc2bf96eb12206178d219753029d
|
#!/usr/bin/env python
import pysam
import sys
if __name__ == "__main__":
fmsum = 0
rvsum = 0
ex = sys.argv[0].split("/")[-1]
if len(sys.argv) < 2:
sys.stderr.write("Usage: python %s <bampath> <outfile> "
"[omit outfile to write to stdout]\n" % ex)
sys.exit(1)
handle = open(sys.argv[2], "w") if len(sys.argv) >= 3 else sys.stdout
for read in pysam.AlignmentFile(sys.argv[1]):
# 2432 = (BAM_FSECONDARY | BAM_FSUPPLEMENTARY | BAM_FREAD2)
if read.flag & 2432:
continue
fmsum += read.opt("FM")
rvsum += read.opt("RV") if read.has_tag("RV") else 0
handle.write("%s-FM:%i;RV:%i.\n" % (sys.argv[1], fmsum, rvsum))
sys.exit(0)
|
ARUP-NGS/BMFtools
|
sample_test/getsums.py
|
Python
|
mit
| 737
|
[
"pysam"
] |
1ba8f7b470ae84271fa367b9e7f50dcbc94c796442e9fc7f8f45ec687eccb603
|
# -*- coding: utf-8 -*-
"""
Created on 26 Sep 2012
@author: Éric Piel
Copyright © 2012-2021 Éric Piel, Philip Winkler, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
from builtins import str
from past.builtins import basestring, long
from collections import OrderedDict
import collections
import functools
import gc
import locale
import logging
import numpy
from odemis import model, util
from odemis.acq.stream import MeanSpectrumProjection, FastEMOverviewStream
from odemis.gui import FG_COLOUR_DIS, FG_COLOUR_WARNING, FG_COLOUR_ERROR, \
CONTROL_COMBO, CONTROL_FLT
from odemis.gui.comp.overlay.world import RepetitionSelectOverlay
from odemis.gui.comp.stream import StreamPanel, FastEMProjectPanel, FastEMROAPanel, FastEMCalibrationPanel, \
EVT_STREAM_VISIBLE, EVT_STREAM_PEAK, OPT_BTN_REMOVE, OPT_BTN_SHOW, OPT_BTN_UPDATE, OPT_BTN_TINT, \
OPT_NAME_EDIT, OPT_BTN_PEAK, OPT_FIT_RGB, OPT_NO_COLORMAPS
from odemis.gui.conf import data
from odemis.gui.conf.data import get_local_vas, get_hw_config
from odemis.gui.conf.util import create_setting_entry, create_axis_entry, SettingEntry
from odemis.gui.model import dye, TOOL_SPOT, TOOL_NONE
from odemis.gui.util import call_in_wx_main, wxlimit_invocation
from odemis.util import fluo
from odemis.util.conversion import wave2rgb
from odemis.util.filename import make_unique_name
from odemis.util.fluo import to_readable_band, get_one_center
from odemis.util.units import readable_str
import re
import time
import wx
from wx.lib.pubsub import pub
import odemis.acq.fastem
import odemis.acq.stream as acqstream
import odemis.gui.model as guimodel
# There are two kinds of controllers:
# * Stream controller: links 1 stream <-> stream panel (cont/stream/StreamPanel)
# * StreamBar controller: links .streams VA <-> stream bar (cont/stream/StreamBar)
# The StreamBar controller is also in charge of the scheduling of the streams.
# Stream scheduling policies: decides which streams which are with .should_update get .is_active
SCHED_LAST_ONE = 1 # Last stream which got added to the should_update set
SCHED_ALL = 2 # All the streams which are in the should_update stream
# Note: it seems users don't like ideas like round-robin, where the hardware
# keeps turn on and off, (and with fluorescence fine control must be done, to
# avoid bleaching).
# TODO: SCHED_ALL_INDIE -> Schedule at the same time all the streams which
# are independent (no emitter from a stream will affect any detector of another
# stream).
PEAK_METHOD_TO_STATE = {None: None, "gaussian": 0, "lorentzian": 1}
class StreamController(object):
""" Manage a stream and its accompanying stream panel """
def __init__(self, stream_bar, stream, tab_data_model, show_panel=True, view=None,
sb_ctrl=None):
"""
view (MicroscopeView or None): Link stream to a view. If view is None, the stream
will be linked to the focused view. Passing a view to the controller ensures
that the visibility button functions correctly when multiple views are present.
sb_ctrl (StreamBarController or None): the StreamBarController which (typically)
created this StreamController. Only needed for ROA repetition display.
"""
self.stream = stream
self.stream_bar = stream_bar
self.view = view
self._sb_ctrl = sb_ctrl
self._stream_config = data.get_stream_settings_config().get(type(stream), {})
options = (OPT_BTN_REMOVE | OPT_BTN_SHOW | OPT_BTN_UPDATE)
# Add tint/colormap option if there is a tint VA and adjust based on the stream type
if hasattr(stream, "tint"):
options |= OPT_BTN_TINT
if isinstance(stream, acqstream.RGBStream):
options |= OPT_NO_COLORMAPS
# (Temporal)SpectrumStreams *with spectrum data* accept the FIT_TO_RGB option
if isinstance(stream, acqstream.SpectrumStream) and stream.raw[0].shape[0] > 1:
options |= OPT_FIT_RGB
# Allow changing the name of dyes (aka FluoStreams)
if isinstance(stream, acqstream.FluoStream):
options |= OPT_NAME_EDIT
# Special display for spectrum (aka SpectrumStream)
if isinstance(stream, acqstream.SpectrumStream) and hasattr(stream, "peak_method"):
options |= OPT_BTN_PEAK
self.stream_panel = StreamPanel(stream_bar, stream, options)
# Detect when the panel is destroyed (but _not_ any of the children)
# Make sure to Unbind ALL event bound to the stream panel!!
self.stream_panel.Bind(wx.EVT_WINDOW_DESTROY, self._on_stream_panel_destroy,
source=self.stream_panel)
self.tab_data_model = tab_data_model
# To update the local resolution without hardware feedback
self._resva = None
self._resmx = None
self._binva = None
# Peak excitation/emission wavelength of the selected dye, to be used for peak text and
# wavelength colour
self._dye_xwl = None
self._dye_ewl = None
self._dye_prev_ewl_center = None # ewl when tint was last changed
self._btn_excitation = None
self._btn_emission = None
self._lbl_exc_peak = None
self._lbl_em_peak = None
self.entries = [] # SettingEntry
self._disabled_entries = set() # set of SettingEntry objects
# Metadata display in analysis tab (static streams)
if isinstance(self.stream, acqstream.StaticStream):
self._display_metadata()
# Add current power VA setting entry (instead of using emtVas)
if hasattr(stream, "power"):
va = self.stream.power
name = "power"
if self.stream.emitter:
hw_settings = self.tab_data_model.main.hw_settings_config
emitter_conf = get_hw_config(self.stream.emitter, hw_settings)
else:
emitter_conf = {}
conf = emitter_conf.get(name)
if conf is not None:
logging.debug("%s emitter configuration found for %s", name,
self.stream.emitter.role)
self.add_setting_entry(name, va, self.stream.emitter, conf)
# Add local hardware settings to the stream panel
self._add_hw_setting_controls()
if hasattr(stream, "emtResolution") or hasattr(stream, "detResolution"):
self._link_resolution()
# TODO: Add also a widget to change the "cropping" by selecting a ratio
# (of the area of the detector), and update the ROI/resolution based on this.
# In that case, we might be able to drop resolution from the local VA
# completely, and only display as an information based on binning and ROI.
self._add_stream_setting_controls()
if len(self.entries) > 0: # TODO: only do so, if some other controls are displayed after
self.stream_panel.add_divider()
num_settings_entries = len(self.entries)
if hasattr(self.stream, "axis_vas"):
self._add_axis_controls()
# Check if dye control is needed
if hasattr(stream, "excitation") and hasattr(stream, "emission"):
self._add_dye_ctrl()
elif hasattr(stream, "excitation"): # only excitation
self._add_excitation_ctrl()
elif hasattr(stream, "emission"): # only emission
self._add_emission_ctrl()
if len(self.entries) > num_settings_entries: # TODO: also only if other controls after
self.stream_panel.add_divider()
# Add metadata button to show dialog with full list of metadata
if isinstance(self.stream, acqstream.StaticStream):
metadata_btn = self.stream_panel.add_metadata_button()
metadata_btn.Bind(wx.EVT_BUTTON, self._on_metadata_btn)
# TODO: Change the way in which BC controls are hidden (Use config in data.py)
if hasattr(stream, "auto_bc") and hasattr(stream, "intensityRange"):
self._add_brightnesscontrast_ctrls()
self._add_outliers_ctrls()
if hasattr(stream, "spectrumBandwidth"):
self._add_wl_ctrls()
self.mean_spec_proj = MeanSpectrumProjection(self.stream)
self.mean_spec_proj.image.subscribe(self._on_new_spec_data, init=True)
if hasattr(self.stream, "selectionWidth"):
self._add_selwidth_ctrl()
if hasattr(stream, "zIndex") and hasattr(self.tab_data_model, "zPos"):
self.stream.zIndex.subscribe(self._on_z_index)
self.tab_data_model.zPos.subscribe(self._on_z_pos, init=True)
if hasattr(stream, "repetition"):
self._add_repetition_ctrl()
if tab_data_model.main.role == "mbsem" and isinstance(stream, acqstream.SEMStream): # don't show for CCD stream
# It's a FastEM
self._add_fastem_ctrls()
# Set the visibility button on the stream panel
if view:
vis = stream in view.stream_tree
else:
vis = stream in tab_data_model.focussedView.value.stream_tree
self.stream_panel.set_visible(vis)
self.stream_panel.Bind(EVT_STREAM_VISIBLE, self._on_stream_visible)
if isinstance(stream, acqstream.SpectrumStream) and hasattr(stream, "peak_method"):
# Set the peak button on the stream panel
self.stream_panel.set_peak(PEAK_METHOD_TO_STATE[stream.peak_method.value])
self.stream_panel.Bind(EVT_STREAM_PEAK, self._on_stream_peak)
stream_bar.add_stream_panel(self.stream_panel, show_panel)
def _on_stream_panel_destroy(self, _):
""" Remove all references to setting entries and the possible VAs they might contain
"""
logging.debug("Stream panel %s destroyed", self.stream.name.value)
# Destroy references to this controller in even handlers
# (More references are present, see getrefcount
self.stream_panel.Unbind(wx.EVT_WINDOW_DESTROY)
self.stream_panel.header_change_callback = None
self.stream_panel.Unbind(EVT_STREAM_VISIBLE)
self.stream_panel.Unbind(EVT_STREAM_PEAK)
self._unlink_resolution()
self._disconnectRepOverlay()
if hasattr(self.stream, "repetition"):
self.stream.repetition.unsubscribe(self._onStreamRep)
# Unsubscribe from all the VAs
# TODO: it seems that in some cases we still receive a call after destruction
for entry in self.entries:
entry.disconnect()
self.entries = []
gc.collect()
def _display_metadata(self):
"""
Display metadata for integration time, ebeam voltage, probe current and
emission/excitation wavelength
"""
mds = self.stream.getRawMetadata()
if not mds:
logging.warning("No raw data in stream")
return
md = mds[0]
# Use "integration time" instead of "exposure time" since, in some cases,
# the dwell time is stored in MD_EXP_TIME.
if model.MD_EXP_TIME in md:
self.add_metadata("Integration time", md[model.MD_EXP_TIME], 's')
elif model.MD_DWELL_TIME in md:
self.add_metadata(model.MD_DWELL_TIME, md[model.MD_DWELL_TIME], 's')
if model.MD_EBEAM_VOLTAGE in md:
self.add_metadata("Acceleration voltage", md[model.MD_EBEAM_VOLTAGE], 'V')
if model.MD_EBEAM_CURRENT in md:
self.add_metadata("Emission current", md[model.MD_EBEAM_CURRENT], 'A')
def pause(self):
""" Pause (freeze) SettingEntry related control updates """
for entry in self.entries:
entry.pause()
if entry.value_ctrl and entry.value_ctrl.IsEnabled():
entry.value_ctrl.Enable(False)
self._disabled_entries.add(entry)
self.stream_panel.enable(False)
def resume(self):
""" Resume SettingEntry related control updates """
for entry in self.entries:
entry.resume()
if entry in self._disabled_entries:
entry.value_ctrl.Enable(True)
self._disabled_entries.remove(entry)
self.stream_panel.enable(True)
def pauseStream(self):
""" Pause (deactivate and stop updating) the stream """
if self.stream.should_update.value:
self.stream.is_active.value = False
self.stream.should_update.value = False
def enable(self, enabled):
""" Enable or disable all SettingEntries
"""
# FIXME: There is a possible problem that, for now, seems to work itself out: When related
# controls dictate between themselves which ones are enabled (i.e. a toggle button,
# dictating which slider is activated, as with auto brightness and contrast), enabling
# all of them could/would be wrong.
#
# When all are enabled now, the position the toggle button is in, immediately causes
# the right slider to be disabled again.
for entry in self.entries:
if entry.value_ctrl:
entry.value_ctrl.Enable(enabled)
def _add_hw_setting_controls(self):
""" Add local version of linked hardware setting VAs """
# Get the emitter and detector configurations if they exist
hw_settings = self.tab_data_model.main.hw_settings_config
if self.stream.emitter:
emitter_conf = get_hw_config(self.stream.emitter, hw_settings)
else:
emitter_conf = {}
if self.stream.detector:
detector_conf = get_hw_config(self.stream.detector, hw_settings)
else:
detector_conf = {}
# TODO "integrationTime" not part of detector VAs now, as stream VA
# -> should be handled as detector VA as it replaces exposureTime VA
# Process the hardware VAs first (emitter and detector hardware VAs are combined into one
# attribute called 'hw_vas'
vas_names = util.sorted_according_to(list(self.stream.hw_vas.keys()), list(emitter_conf.keys()))
for name in vas_names:
va = self.stream.hw_vas[name]
conf = emitter_conf.get(name, detector_conf.get(name, None))
if conf is not None:
logging.debug("%s hardware configuration found", name)
self.add_setting_entry(name, va, self.stream.emitter, conf)
# Process the emitter VAs first
vas_names = util.sorted_according_to(list(self.stream.emt_vas.keys()), list(emitter_conf.keys()))
for name in vas_names:
va = self.stream.emt_vas[name]
conf = emitter_conf.get(name)
if conf is not None:
logging.debug("%s emitter configuration found for %s", name,
self.stream.emitter.role)
self.add_setting_entry(name, va, self.stream.emitter, conf)
# Then process the detector
vas_names = util.sorted_according_to(list(self.stream.det_vas.keys()), list(detector_conf.keys()))
for name in vas_names:
va = self.stream.det_vas[name]
conf = detector_conf.get(name)
if conf is not None:
logging.debug("%s detector configuration found for %s", name,
self.stream.detector.role)
self.add_setting_entry(name, va, self.stream.detector, conf)
def _add_stream_setting_controls(self):
""" Add control for the VAs of the stream
Note: only the VAs which are defined in the stream_config are shown.
"""
for vaname, conf in self._stream_config.items():
try:
va = getattr(self.stream, vaname)
except AttributeError:
logging.debug("Skipping non existent VA %s on %s", vaname, self.stream)
continue
conf = self._stream_config.get(vaname)
self.add_setting_entry(vaname, va, hw_comp=None, conf=conf)
def _add_axis_controls(self):
"""
Add controls for the axes that are connected to the stream
"""
# Add Axes (in same order as config)
axes_names = util.sorted_according_to(list(self.stream.axis_vas.keys()), list(self._stream_config.keys()))
for axisname in axes_names:
conf = self._stream_config.get(axisname)
self.add_setting_entry(axisname, self.stream.axis_vas[axisname], None, conf)
def add_setting_entry(self, name, va, hw_comp, conf=None):
""" Add a name/value pair to the settings panel.
:param name: (string): name of the value
:param va: (VigilantAttribute)
:param hw_comp: (Component): the component that contains this VigilantAttribute
:param conf: ({}): Configuration items that may override default settings
:return SettingEntry or None: the entry created, or None, if no entry was
created (eg, because the conf indicates CONTROL_NONE).
"""
se = create_setting_entry(self.stream_panel, name, va, hw_comp, conf)
if se is not None:
self.entries.append(se)
return se
def add_axis_entry(self, name, comp, conf=None):
""" Add a widget to the setting panel to control an axis
:param name: (string): name of the axis
:param comp: (Component): the component that contains this axis
:param conf: ({}): Configuration items that may override default settings
"""
ae = create_axis_entry(self.stream_panel, name, comp, conf)
if ae is not None:
self.entries.append(ae)
return ae
def add_metadata(self, key, value, unit=None):
""" Adds an entry representing specific metadata
According to the metadata key, the right representation is used for the value.
:param key: (model.MD_*) the metadata key
:param value: (depends on the metadata) the value to display
:param unit: (None or string) unit of the values. If necessary a SI prefix
will be used to make the value more readable, unless None is given.
"""
# By default the key is a nice user-readable string
label = str(key)
# Convert value to a nice string according to the metadata type
try:
if key == model.MD_ACQ_DATE:
# convert to a date using the user's preferences
nice_str = time.strftime("%c", time.localtime(value))
# In Python 2, we still need to convert it to unicode
if isinstance(nice_str, bytes):
nice_str = nice_str.decode(locale.getpreferredencoding())
else:
# Still try to beautify a bit if it's a number
if (
isinstance(value, (int, long, float)) or
(
isinstance(value, collections.Iterable) and
len(value) > 0 and
isinstance(value[0], (int, long, float))
)
):
nice_str = readable_str(value, unit, 3)
else:
nice_str = str(value)
self.stream_panel.add_readonly_field(label, nice_str)
except Exception:
logging.exception("Trying to convert metadata %s", key)
def _on_stream_visible(self, evt):
""" Show or hide a stream in the focussed view if the visibility button is clicked """
if self.view:
view = self.view
else:
view = self.tab_data_model.focussedView.value
if not view:
return
if evt.visible:
logging.debug("Showing stream '%s'", self.stream.name.value)
view.addStream(self.stream)
else:
logging.debug("Hiding stream '%s'", self.stream.name.value)
view.removeStream(self.stream)
def _on_stream_peak(self, evt):
""" Show or hide a stream in the focussed view if the peak button is clicked """
for m, s in PEAK_METHOD_TO_STATE.items():
if evt.state == s:
self.stream.peak_method.value = m
logging.debug("peak method set to %s", m)
break
else:
logging.error("No peak method corresponding to state %s", evt.state)
def _on_z_index(self, zIndex):
self.tab_data_model.zPos.unsubscribe(self._on_z_pos)
metadata = self.stream.getRawMetadata()[0] # take the first only
zcentre = metadata[model.MD_POS][2]
zstep = metadata[model.MD_PIXEL_SIZE][2]
# The number of zIndexes is zIndex.range[1] + 1 (as it starts at 0).
# zstart is the *center* position of the first pixel, so we need
# len(zIndexes) - 1 == zIndex.range[1]
zstart = zcentre - self.stream.zIndex.range[1] * zstep / 2
self.tab_data_model.zPos.value = self.tab_data_model.zPos.clip(zstart + zstep * zIndex)
self.tab_data_model.zPos.subscribe(self._on_z_pos)
def _on_z_pos(self, zPos):
# Given an absolute physical position in z pos, set the z index for a stream
# based on physical parameters
self.stream.zIndex.unsubscribe(self._on_z_index)
metadata = self.stream.getRawMetadata()[0] # take the first only
zcentre = metadata[model.MD_POS][2]
zstep = metadata[model.MD_PIXEL_SIZE][2]
zstart = zcentre - self.stream.zIndex.range[1] * zstep / 2
val = int(round((zPos - zstart) / zstep))
self.stream.zIndex.value = self.stream.zIndex.clip(val)
self.stream.zIndex.subscribe(self._on_z_index)
def _on_new_dye_name(self, dye_name):
""" Assign excitation and emission wavelengths if the given name matches a known dye """
# update the name of the stream
self.stream.name.value = dye_name
# update the excitation and emission wavelength
if dye_name in dye.DyeDatabase:
xwl, ewl = dye.DyeDatabase[dye_name]
self._dye_xwl = xwl
self._dye_ewl = ewl
self.stream.excitation.value = fluo.find_best_band_for_dye(
xwl, self.stream.excitation.choices)
self.stream.emission.value = fluo.find_best_band_for_dye(
ewl, self.stream.emission.choices)
# use peak values to pick the best tint and set the wavelength colour
xcol = wave2rgb(xwl)
self._btn_excitation.set_colour(xcol)
ecol = wave2rgb(ewl)
self._btn_emission.set_colour(ecol)
self.stream.tint.value = ecol
else:
self._dye_xwl = None
self._dye_ewl = None
# Either update the peak info, or clean up if nothing to display
self.update_peak_label_fit(self._lbl_exc_peak, self._btn_excitation,
self._dye_xwl, self.stream.excitation.value)
self.update_peak_label_fit(self._lbl_em_peak, self._btn_emission,
self._dye_ewl, self.stream.emission.value)
def _on_metadata_btn(self, evt):
text = u""
raw = [r for r in self.stream.raw if r is not None]
text += u"======================================\nGeneral\n"
text += u"======================================\n"
for i, r in enumerate(raw):
if len(raw) > 1:
text += u"========= Array %d =========\n" % (i + 1,)
shape = r.shape
dtype = r.dtype
md = r.metadata
text += u"Shape: %s\n" % (u" x ".join(str(s) for s in shape),)
text += u"Data type: %s\n" % (dtype,)
for key in sorted(md):
if key == model.MD_EXTRA_SETTINGS:
# show extra settings last
continue
v = md[key]
if key == model.MD_ACQ_DATE: # display date in readable format
nice_str = time.strftime("%c", time.localtime(v))
# In Python 2, we still need to convert it to unicode
if isinstance(nice_str, bytes):
nice_str = nice_str.decode(locale.getpreferredencoding())
text += u"%s: %s\n" % (key, nice_str)
else:
if isinstance(v, numpy.ndarray):
# Avoid ellipses (eg, [1, ..., 100 ])as we want _all_
# the data (unless it'd get really crazy).
# TODO: from numpy v1.14, the "threshold" argument can
# be directly used in array2string().
numpy.set_printoptions(threshold=2500)
v = numpy.array2string(v, max_line_width=100, separator=u", ")
numpy.set_printoptions(threshold=1000)
elif isinstance(v, list) and len(v) > 2500:
v = u"[%s … %s]" % (u", ".join(str(a) for a in v[:20]), u", ".join(str(a) for a in v[-20:]))
text += u"%s: %s\n" % (key, v)
# only display extra settings once
if model.MD_EXTRA_SETTINGS in raw[0].metadata:
text += u"\n======================================\nHardware Settings\n"
text += u"======================================\n"
for comp, vas in md[model.MD_EXTRA_SETTINGS].items():
try:
if vas:
text += u"Component %s:\n" % comp
for name, (value, unit) in vas.items():
unit = unit or "" # don't display 'None'
unit = unit if value is not None else "" # don't display unit if data is None (None Hz doesn't make sense)
if isinstance(value, dict):
if value:
text += u"\t%s:\n" % name
for key, val in value.items():
text += u"\t\t%s: %s %s\n" % (key, val, unit)
else:
# still display the VA, might be interesting (e.g. that no axis was referenced)
text += u"\t%s: {}\n" % name
else:
text += u"\t%s: %s %s\n" % (name, value, unit)
except Exception as ex:
logging.warning("Couldn't display metadata for component %s: %s" % (comp, ex))
continue
# Note: we show empty window even if no data present, to let the user know
# that there is no data, but the button worked fine.
md_frame = self.stream_panel.create_text_frame(u"Metadata of %s" % self.stream.name.value, text)
md_frame.ShowModal()
md_frame.Destroy()
# Panel state methods
def to_locked_mode(self):
self.stream_panel.to_locked_mode()
def to_static_mode(self):
self.stream_panel.to_static_mode()
# END Panel state methods
def _link_resolution(self):
"""
Ensure that the resolution setting is recomputed when the binning/scale
changes.
"""
# shape and resolution.range[1] are almost always the same, but in some
# cases like spectrometer, only the shape contains the info needed.
if hasattr(self.stream, "emtResolution"):
self._resva = self.stream.emtResolution
self._resmx = self.stream.emitter.shape[:2]
prefix = "emt"
elif hasattr(self.stream, "detResolution"):
self._resva = self.stream.detResolution
self._resmx = self.stream.detector.shape[:2]
prefix = "det"
else:
raise LookupError("No resolution VA found")
if self._resva.readonly:
logging.info("Will not update resolution, as it is readonly")
# Find the binning/scale VA
for n in ("Binning", "Scale"):
fn = prefix + n
if hasattr(self.stream, fn):
self._binva = getattr(self.stream, fn)
break
else:
logging.warning("Stream has resolution VA but no binning/scale, "
"so it will not be updated.")
return
self._binva.subscribe(self._update_resolution)
self._resva.subscribe(self._on_resolution)
def _unlink_resolution(self):
if self._binva:
self._binva.unsubscribe(self._update_resolution)
if self._resva:
self._resva.subscribe(self._on_resolution)
def _update_resolution(self, scale, crop=1.0):
"""
scale (2 ints or floats): new divisor of the resolution
crop (0 < float <= 1): ratio of the FoV used
"""
# if the stream is not playing, the hardware should take care of it
if self.stream.is_active.value:
return
newres = (int((self._resmx[0] * crop) // scale[0]),
int((self._resmx[1] * crop) // scale[1]))
newres = self._resva.clip(newres)
logging.debug("Updated resolution to %s", newres)
self._resva.value = newres
def _on_resolution(self, res, crop=1.0):
# if the stream is not playing, the hardware should take care of it
if self.stream.is_active.value:
return
scale = self._binva.value
maxres = (int((self._resmx[0] * crop) // scale[0]),
int((self._resmx[1] * crop) // scale[1]))
maxres = self._resva.clip(maxres)
newres = (min(res[0], maxres[0]), min(res[1], maxres[1]))
if newres != res:
logging.debug("Limiting resolution to %s", newres)
self._resva.unsubscribe(self._on_resolution) # to avoid infinite recursion
self._resva.value = newres
self._resva.subscribe(self._on_resolution)
def sync_tint_on_emission(self, emission_wl, exitation_wl):
""" Set the tint to the same colour as emission, if no dye has been selected. If a dye is
selected, it's dependent on the dye information.
:param emission_wl: ((tuple of) tuple of floats) emission wavelength
:param exitation_wl: ((tuple of) tuple of floats) excitation wavelength
"""
if self._dye_ewl is None: # if dye is used, keep the peak wavelength
ewl_center = fluo.get_one_center_em(emission_wl, exitation_wl)
if self._dye_prev_ewl_center == ewl_center:
return
self._dye_prev_ewl_center = ewl_center
colour = wave2rgb(ewl_center)
logging.debug("Synchronising tint to %s", colour)
self.stream.tint.value = colour
# Control addition
def _add_selwidth_ctrl(self):
lbl_selection_width, sld_selection_width = self.stream_panel.add_specselwidth_ctrl()
se = SettingEntry(name="selectionwidth", va=self.stream.selectionWidth, stream=self.stream,
lbl_ctrl=lbl_selection_width, value_ctrl=sld_selection_width,
events=wx.EVT_SLIDER)
self.entries.append(se)
def _add_wl_ctrls(self):
self._sld_spec, txt_spec_center, txt_spec_bw = self.stream_panel.add_specbw_ctrls()
se = SettingEntry(name="spectrum", va=self.stream.spectrumBandwidth, stream=self.stream,
value_ctrl=self._sld_spec, events=wx.EVT_SLIDER)
self.entries.append(se)
def _get_center():
""" Return the low/high values for the bandwidth, from the requested center """
va = self.stream.spectrumBandwidth
ctrl = txt_spec_center
# ensure the low/high values are always within the allowed range
wl = va.value
wl_rng = (va.range[0][0], va.range[1][1])
width = wl[1] - wl[0]
ctr_rng = wl_rng[0] + width / 2, wl_rng[1] - width / 2
req_center = ctrl.GetValue()
new_center = min(max(ctr_rng[0], req_center), ctr_rng[1])
if req_center != new_center:
# VA might not change => update value ourselves
ctrl.SetValue(new_center)
return new_center - width / 2, new_center + width / 2
se = SettingEntry(name="spectrum_center", va=self.stream.spectrumBandwidth,
stream=self.stream, value_ctrl=txt_spec_center, events=wx.EVT_COMMAND_ENTER,
va_2_ctrl=lambda r: txt_spec_center.SetValue((r[0] + r[1]) / 2),
ctrl_2_va=_get_center)
self.entries.append(se)
def _get_bandwidth():
""" Return the low/high values for the bandwidth, from the requested bandwidth """
va = self.stream.spectrumBandwidth
ctrl = txt_spec_bw
# ensure the low/high values are always within the allowed range
wl = va.value
wl_rng = (va.range[0][0], va.range[1][1])
center = (wl[0] + wl[1]) / 2
max_width = max(center - wl_rng[0], wl_rng[1] - center) * 2
req_width = ctrl.GetValue()
new_width = max(0, min(req_width, max_width))
if req_width != new_width:
# VA might not change => update value ourselves
ctrl.SetValue(new_width)
return center - new_width / 2, center + new_width / 2
se = SettingEntry(name="spectrum_bw", va=self.stream.spectrumBandwidth,
stream=self.stream, value_ctrl=txt_spec_bw, events=wx.EVT_COMMAND_ENTER,
va_2_ctrl=lambda r: txt_spec_bw.SetValue(r[1] - r[0]),
ctrl_2_va=_get_bandwidth)
self.entries.append(se)
@wxlimit_invocation(0.2)
def _on_new_spec_data(self, gspec):
if not self or not self._sld_spec or gspec is None:
# if no new calibration, or empty data
return # already deleted
logging.debug("New spec data")
# Display the global spectrum in the visual range slider
if len(gspec) <= 1:
logging.warning("Strange spectrum of len %d", len(gspec))
return
# make it fit between 0 and 1
if len(gspec) >= 5:
# skip the 2 biggest peaks
s_values = numpy.sort(gspec)
mins, maxs = s_values[0], s_values[-3]
else:
mins, maxs = gspec.min(), gspec.max()
# for spectrum, 0 has little sense, just care of the min
if mins < maxs:
coef = 1 / (maxs - mins)
else: # division by 0
coef = 1
gspec = (gspec - mins) * coef
self._sld_spec.SetContent(gspec.tolist())
def _add_dye_ctrl(self):
"""
Add controls to the stream panel needed for dye emission and excitation
Specifically used when both emission and excitation are present (because
together, more information can be extracted/presented).
"""
# Excitation
if not self.stream.excitation.readonly:
# TODO: mark dye incompatible with the hardware with a "disabled"
# colour in the list. (Need a special version of the combobox?)
self.stream_panel.set_header_choices(list(dye.DyeDatabase.keys()))
self.stream_panel.header_change_callback = self._on_new_dye_name
center_wl = fluo.get_one_center_ex(self.stream.excitation.value, self.stream.emission.value)
self._add_excitation_ctrl(wave2rgb(center_wl))
# Emission
center_wl = fluo.get_one_center_em(self.stream.emission.value, self.stream.excitation.value)
self._add_emission_ctrl(wave2rgb(center_wl))
def _onExcitationChannelChange(self, _):
"""
Event handler for the Excitation channel combobox selection.
Update the power slider range with the current power VA's
"""
if not self._power_entry:
return
# Set the slider with min value first (in order to change the range in case new value is > current slider max)
self._power_entry.value_ctrl.SetValue(self._power_entry.vigilattr.min)
# Then set the range followed by the actual value (this way no exception is thrown by SetRange)
self._power_entry.value_ctrl.SetRange(self._power_entry.vigilattr.min, self._power_entry.vigilattr.max)
self._power_entry.value_ctrl.SetValue(self._power_entry.vigilattr.value)
def _add_excitation_ctrl(self, center_wl_color=None):
"""
Add excitation ctrl
center_wl_color (None or 3 0<= int <= 255): RGB colour. If None, it
will be guessed.
"""
if center_wl_color is None:
center_wl = fluo.get_one_center(self.stream.excitation.value)
center_wl_color = wave2rgb(center_wl)
band = to_readable_band(self.stream.excitation.value)
readonly = self.stream.excitation.readonly or len(self.stream.excitation.choices) <= 1
r = self.stream_panel.add_dye_excitation_ctrl(band, readonly, center_wl_color)
lbl_ctrl, value_ctrl, self._lbl_exc_peak, self._btn_excitation = r
self.update_peak_label_fit(self._lbl_exc_peak, self._btn_excitation, None, band)
if not readonly:
choices = sorted(self.stream.excitation.choices, key=get_one_center)
for b in choices:
value_ctrl.Append(to_readable_band(b), b)
# Bind the excitation combobox selection event to update the power slider range
value_ctrl.Bind(wx.EVT_COMBOBOX, self._onExcitationChannelChange)
# Store power entry to be used in _onExcitationChannelChange event handler
self._power_entry = next((spe for spe in self.entries if spe.name == "power"), None)
def _excitation_2_va(value_ctrl=value_ctrl):
"""
Called when the text is changed (by the user).
returns a value to set for the VA
"""
excitation_wavelength = value_ctrl.GetClientData(value_ctrl.GetSelection())
self.sync_tint_on_emission(self.stream.emission.value, excitation_wavelength)
return excitation_wavelength
def _excitation_2_ctrl(value, value_ctrl=value_ctrl):
"""
Called to update the widgets (text + colour display) when the VA changes.
returns nothing
"""
# The control can be a label or a combo-box, but we are connected only
# when it's a combo-box
for i in range(value_ctrl.GetCount()):
if value_ctrl.GetClientData(i) == value:
value_ctrl.SetSelection(i)
break
else:
logging.error("No existing label found for value %s", value)
if self._dye_xwl is None and self._btn_excitation:
# no dye info? use hardware settings
colour = wave2rgb(fluo.get_one_center_ex(value, self.stream.emission.value))
self._btn_excitation.set_colour(colour)
else:
self.update_peak_label_fit(self._lbl_exc_peak,
self._btn_excitation,
self._dye_xwl, value)
# also update emission colour as it's dependent on excitation when multi-band
if self._dye_ewl is None and self._btn_emission:
colour = wave2rgb(fluo.get_one_center_em(self.stream.emission.value, value))
self._btn_emission.set_colour(colour)
se = SettingEntry(name="excitation", va=self.stream.excitation, stream=self.stream,
lbl_ctrl=lbl_ctrl, value_ctrl=value_ctrl, events=wx.EVT_COMBOBOX,
va_2_ctrl=_excitation_2_ctrl, ctrl_2_va=_excitation_2_va)
self.entries.append(se)
def _add_emission_ctrl(self, center_wl_color=None):
"""
Add emission ctrl
center_wl_color (None or 3 0<= int <= 255): RGB colour. If None, it
will be guessed.
"""
em = self.stream.emission.value
band = to_readable_band(em)
readonly = self.stream.emission.readonly or len(self.stream.emission.choices) <= 1
if center_wl_color is None:
if isinstance(em, basestring):
# Unknown colour or non-meaningful
center_wl_color = None
else:
center_wl = fluo.get_one_center(self.stream.emission.value)
center_wl_color = wave2rgb(center_wl)
r = self.stream_panel.add_dye_emission_ctrl(band, readonly, center_wl_color)
lbl_ctrl, value_ctrl, self._lbl_em_peak, self._btn_emission = r
if isinstance(em, basestring) and em != model.BAND_PASS_THROUGH:
if not readonly:
logging.error("Emission band is a string (%s), but not readonly", em)
return
self.update_peak_label_fit(self._lbl_em_peak, self._btn_emission, None, band)
if not readonly:
choices = sorted(self.stream.emission.choices, key=fluo.get_one_center)
for b in choices:
value_ctrl.Append(to_readable_band(b), b)
def _emission_2_va(value_ctrl=value_ctrl):
""" Called when the text is changed (by the user)
Also updates the tint as a side-effect.
"""
emission_wavelength = value_ctrl.GetClientData(value_ctrl.GetSelection())
self.sync_tint_on_emission(emission_wavelength, self.stream.excitation.value)
return emission_wavelength
def _emission_2_ctrl(value, value_ctrl=value_ctrl):
"""
Called to update the widgets (text + colour display) when the VA changes.
returns nothing
"""
for i in range(value_ctrl.GetCount()):
if value_ctrl.GetClientData(i) == value:
value_ctrl.SetSelection(i)
break
else:
logging.error("No existing label found for value %s", value)
if self._dye_ewl is None: # no dye info? use hardware settings
colour = wave2rgb(fluo.get_one_center_em(value, self.stream.excitation.value))
self._btn_emission.set_colour(colour)
else:
self.update_peak_label_fit(self._lbl_em_peak,
self._btn_emission,
self._dye_ewl, value)
# also update excitation colour as it's dependent on emission when multiband
if self._dye_xwl is None:
colour = wave2rgb(fluo.get_one_center_ex(self.stream.excitation.value, value))
self._btn_excitation.set_colour(colour)
se = SettingEntry(name="emission", va=self.stream.emission, stream=self.stream,
lbl_ctrl=lbl_ctrl, value_ctrl=value_ctrl, events=wx.EVT_COMBOBOX,
va_2_ctrl=_emission_2_ctrl, ctrl_2_va=_emission_2_va)
self.entries.append(se)
def _add_brightnesscontrast_ctrls(self):
""" Add controls for manipulating the (auto) contrast of the stream image data """
btn_autobc, lbl_bc_outliers, sld_outliers = self.stream_panel.add_autobc_ctrls()
# The following closures are used to link the state of the button to the availability of
# the slider
def _autobc_to_va():
enabled = btn_autobc.GetToggle()
sld_outliers.Enable(enabled)
return enabled
def _va_to_autobc(enabled):
btn_autobc.SetToggle(enabled)
sld_outliers.Enable(enabled)
# Store a setting entry for the auto brightness/contrast button
se = SettingEntry(name="autobc", va=self.stream.auto_bc, stream=self.stream,
value_ctrl=btn_autobc, events=wx.EVT_BUTTON,
va_2_ctrl=_va_to_autobc, ctrl_2_va=_autobc_to_va)
self.entries.append(se)
# Store a setting entry for the outliers slider
se = SettingEntry(name="outliers", va=self.stream.auto_bc_outliers, stream=self.stream,
value_ctrl=sld_outliers, lbl_ctrl=lbl_bc_outliers, events=wx.EVT_SLIDER)
self.entries.append(se)
def _add_outliers_ctrls(self):
""" Add the controls for manipulation the outliers """
sld_hist, txt_low, txt_high = self.stream_panel.add_outliers_ctrls()
self._prev_drange = (self.stream.intensityRange.range[0][0],
self.stream.intensityRange.range[1][1])
# The standard va_2_ctrl could almost work, but in some cases, if the
# range and value are completely changed, they need to be set in the
# right order. The slider expects to have first the range updated, then
# the new value. So always try to do the fast version, and if it failed,
# use a slower version which uses the latest known values of everything.
def _on_irange(val):
intensity_rng_va = self.stream.intensityRange
drange = (intensity_rng_va.range[0][0], intensity_rng_va.range[1][1])
if drange != self._prev_drange:
self._prev_drange = drange
sld_hist.SetRange(drange[0], drange[1])
# Setting the values should not be necessary as the value should have
# already been updated via the VA update
txt_low.SetValueRange(drange[0], drange[1])
txt_high.SetValueRange(drange[0], drange[1])
if not all(drange[0] <= v <= drange[1] for v in val):
# Value received is not fitting the current range, which is a
# sign that it's too old. Getting the latest one should fix it.
cval = intensity_rng_va.value
logging.debug("Updating latest irange %s to %s", val, cval)
val = cval
sld_hist.SetValue(val)
# TODO: also do the txt_low & txt_high .SetValue?
se = SettingEntry(name="intensity_range", va=self.stream.intensityRange, stream=self.stream,
value_ctrl=sld_hist, events=wx.EVT_SLIDER, va_2_ctrl=_on_irange)
self.entries.append(se)
if hasattr(self.stream, "auto_bc"):
# The outlier controls need to be disabled when auto brightness/contrast is active
def _enable_outliers(autobc_enabled):
""" En/disable the controls when the auto brightness and contrast are toggled """
sld_hist.Enable(not autobc_enabled)
txt_low.Enable(not autobc_enabled)
txt_high.Enable(not autobc_enabled)
# ctrl_2_va gets passed an identify function, to prevent the VA connector from looking
# for a linked value control (Which we don't really need in this case. This setting
# entry is only here so that a reference to `_enable_outliers` will be preserved).
se = SettingEntry("_auto_bc_switch", va=self.stream.auto_bc, stream=self.stream,
va_2_ctrl=_enable_outliers, ctrl_2_va=lambda x: x)
self.entries.append(se)
def _get_lowi():
intensity_rng_va = self.stream.intensityRange
req_lv = txt_low.GetValue()
hiv = intensity_rng_va.value[1]
# clamp low range to max high range
lov = max(intensity_rng_va.range[0][0], min(req_lv, hiv, intensity_rng_va.range[1][0]))
if lov != req_lv:
txt_low.SetValue(lov)
return lov, hiv
se = SettingEntry(name="low_intensity", va=self.stream.intensityRange, stream=self.stream,
value_ctrl=txt_low, events=wx.EVT_COMMAND_ENTER,
va_2_ctrl=lambda r: txt_low.SetValue(r[0]), ctrl_2_va=_get_lowi)
self.entries.append(se)
def _get_highi():
intensity_rng_va = self.stream.intensityRange
lov = intensity_rng_va.value[0]
req_hv = txt_high.GetValue()
# clamp high range to at least low range
hiv = max(lov, intensity_rng_va.range[0][1], min(req_hv, intensity_rng_va.range[1][1]))
if hiv != req_hv:
txt_high.SetValue(hiv)
return lov, hiv
se = SettingEntry(name="high_intensity", va=self.stream.intensityRange, stream=self.stream,
value_ctrl=txt_high, events=wx.EVT_COMMAND_ENTER,
va_2_ctrl=lambda r: txt_high.SetValue(r[1]), ctrl_2_va=_get_highi)
self.entries.append(se)
def _on_histogram(hist):
""" Display the new histogram data in the histogram slider
hist (nd.array of N values): the content of the histogram, ordered
by bins.
"""
# TODO: don't update when folded: it's useless => unsubscribe
if len(hist):
# a logarithmic histogram is easier to read
lhist = numpy.log1p(hist)
lhistmx = lhist.max()
if lhistmx == 0: # avoid dividing by 0
lhistmx = 1
norm_hist = lhist / lhistmx
# ndarrays work too, but slower to display
norm_hist = norm_hist.tolist()
else:
norm_hist = []
sld_hist.SetContent(norm_hist)
# Again, we use an entry to keep a reference of the closure around
se = SettingEntry("_histogram", va=self.stream.histogram, stream=self.stream,
va_2_ctrl=_on_histogram, ctrl_2_va=lambda x: x)
self.entries.append(se)
def _add_repetition_ctrl(self):
"""
Add the repetition/pixelSize/fuzzing settings for the RepetitionStreams
"""
va_config = OrderedDict((
("repetition", {
"control_type": CONTROL_COMBO,
"choices": {(1, 1)}, # Actually, it's immediately replaced by _onStreamRep()
"accuracy": None, # never simplify the numbers
}),
("pixelSize", {
"control_type": CONTROL_FLT,
}),
("fuzzing", {
"tooltip": u"Scans each pixel over their complete area, instead of only scanning the center the pixel area.",
}),
))
roa_ctrls = []
for vaname, conf in va_config.items():
try:
va = getattr(self.stream, vaname)
except AttributeError:
logging.debug("Skipping non existent VA %s on %s", vaname, self.stream)
continue
ent = self.add_setting_entry(vaname, va, hw_comp=None, conf=conf)
if vaname == "repetition":
self._rep_ctrl = ent.value_ctrl
# Update the combo box choices based on the current repetition value
# (an alternative would be to override our own va_2_ctrl to the
# SettingEntry, but currently create_setting_entry() doesn't
# allow to change it)
self.stream.repetition.subscribe(self._onStreamRep, init=True)
if vaname in ("repetition", "pixelSize"):
roa_ctrls.append(ent.value_ctrl)
if hasattr(self.tab_data_model, "roa") and self._sb_ctrl:
self._connectRepOverlay(roa_ctrls)
# END Control addition
@staticmethod
def update_peak_label_fit(lbl_ctrl, col_ctrl, wl, band):
""" Changes the colour & tooltip of the peak label based on how well it fits to the given
band setting.
:param lbl_ctrl: (wx.StaticText) control to update the foreground colour
:param col_ctrl: (wx.ButtonColour) just to update the tooltip
:param wl: (None or float) the wavelength of peak of the dye or None if no dye
:param band: ((list of) tuple of 2 or 5 floats) the band of the hw setting
"""
if None in (lbl_ctrl, col_ctrl):
return
if wl is None:
# No dye known => no peak information
lbl_ctrl.LabelText = u""
lbl_ctrl.SetToolTip(None)
col_ctrl.SetToolTip(u"Centre wavelength colour")
else:
wl_nm = int(round(wl * 1e9))
lbl_ctrl.LabelText = u"Peak at %d nm" % wl_nm
col_ctrl.SetToolTip(u"Peak wavelength colour")
fit = fluo.estimate_fit_to_dye(wl, band)
# Update colour
colour = {fluo.FIT_GOOD: FG_COLOUR_DIS,
fluo.FIT_BAD: FG_COLOUR_WARNING,
fluo.FIT_IMPOSSIBLE: FG_COLOUR_ERROR}[fit]
lbl_ctrl.SetForegroundColour(colour)
# Update tooltip string
tooltip = {
fluo.FIT_GOOD: u"The peak is inside the band %d→%d nm",
fluo.FIT_BAD: u"Some light might pass through the band %d→%d nm",
fluo.FIT_IMPOSSIBLE: u"The peak is too far from the band %d→%d nm"
}[fit]
if isinstance(band[0], collections.Iterable): # multi-band
band = fluo.find_best_band_for_dye(wl, band)
low, high = [int(round(b * 1e9)) for b in (band[0], band[-1])]
lbl_ctrl.SetToolTip(tooltip % (low, high))
# Repetition visualisation on focus/hover methods
# The global rule (in order):
# * if mouse is hovering an entry (repetition or pixel size) => display
# repetition for this stream
# * if an entry of stream has focus => display repetition for this stream
# * don't display repetition
def _connectRepOverlay(self, controls):
"""
Connects the stream VAs and controls to display the repetition overlay
when needed.
Warning: must be called from the main GUI thread.
stream (RepetitionStream)
controls (list of wx.Controls): controls that are used to change the
repetition/pixel size info
"""
self._roa_ctrls = set() # all wx Controls which should activate visualisation
self._hover_rep = False # True if current mouse is hovering a control
# repetition VA not needed: if it changes, either roi or pxs also change
self.stream.roi.subscribe(self._onRepStreamVA)
self.stream.pixelSize.subscribe(self._onRepStreamVA)
for c in controls:
self._roa_ctrls.add(c)
c.Bind(wx.EVT_SET_FOCUS, self._onRepFocus)
c.Bind(wx.EVT_KILL_FOCUS, self._onRepFocus)
c.Bind(wx.EVT_ENTER_WINDOW, self._onRepHover)
c.Bind(wx.EVT_LEAVE_WINDOW, self._onRepHover)
# To handle the combobox, which send leave window events when the
# mouse goes into the text ctrl child of the combobox.
if hasattr(c, "TextCtrl"):
tc = c.TextCtrl
self._roa_ctrls.add(tc)
tc.Bind(wx.EVT_ENTER_WINDOW, self._onRepHover)
def _disconnectRepOverlay(self):
if hasattr(self.stream, "roi"):
self.stream.roi.unsubscribe(self._onRepStreamVA)
if hasattr(self.stream, "pixelSize"):
self.stream.pixelSize.unsubscribe(self._onRepStreamVA)
@wxlimit_invocation(0.1)
def _updateRepOverlay(self):
"""
Ensure the repetition overlay is displaying the right thing
"""
# Show iff: the mouse is hovering a roa_ctrls, or one roa_ctrl has the focus
focused = wx.Window.FindFocus()
show_rep = self._hover_rep or (focused in self._roa_ctrls)
if show_rep:
rep = self.stream.repetition.value
if isinstance(self.stream, acqstream.ARStream):
style = RepetitionSelectOverlay.FILL_POINT
else:
style = RepetitionSelectOverlay.FILL_GRID
self._sb_ctrl.show_roa_repetition(self.stream, rep, style)
else:
self._sb_ctrl.show_roa_repetition(self.stream, None)
def _onRepStreamVA(self, _):
"""
Called when one of the repetition VAs of a RepetitionStream is modified
stream (RepetitionStream)
val (value): new VA value, unused
"""
self._updateRepOverlay()
def _onRepFocus(self, evt):
"""
Called when any control related to the repetition get/loose focus
"""
self._updateRepOverlay()
evt.Skip()
def _onRepHover(self, evt):
if evt.Entering():
self._hover_rep = True
elif evt.Leaving():
self._hover_rep = False
else:
logging.warning("neither leaving nor entering")
self._updateRepOverlay()
evt.Skip()
# Repetition combobox content updater
@call_in_wx_main
def _onStreamRep(self, rep):
"""
Called when the repetition VAs of a RepetitionStream is modified.
Recalculate the repetition presets according to the repetition ratio
"""
ratio = rep[1] / rep[0]
# Create the entries:
choices = [(1, 1), rep] # 1 x 1 should always be there
# Add a couple values below/above the current repetition
for m in (1 / 4, 1 / 2, 2, 4, 10):
x = int(round(rep[0] * m))
y = int(round(x * ratio))
choices.append((x, y))
# remove non-possible ones
rng = self.stream.repetition.range
def is_compatible(c):
# TODO: it's actually further restricted by the current size of
# the ROI (and the minimum size of the pixelSize), so some of the
# big repetitions might actually not be valid. It's not a big
# problem as the VA setter will silently limit the repetition
return (rng[0][0] <= c[0] <= rng[1][0] and
rng[0][1] <= c[1] <= rng[1][1])
choices = set(choice for choice in choices if is_compatible(choice))
choices = sorted(choices)
# replace the old list with this new version
self._rep_ctrl.Clear()
for choice in choices:
self._rep_ctrl.Append(u"%s x %s px" % choice, choice)
# Make sure the current value is selected
self._rep_ctrl.SetSelection(choices.index(rep))
def _add_fastem_ctrls(self):
self.stream_panel.add_divider()
_, btn_autofocus = self.stream_panel.add_run_btn("Autofocus")
_, btn_autobc = self.stream_panel.add_run_btn("Auto-brightness/contrast")
_, btn_autostigmation = self.stream_panel.add_run_btn("Autostigmation")
btn_autofocus.Bind(wx.EVT_BUTTON, self._on_btn_autofocus)
btn_autobc.Bind(wx.EVT_BUTTON, self._on_btn_autobc)
btn_autostigmation.Bind(wx.EVT_BUTTON, self._on_btn_autostigmation)
@call_in_wx_main
def _on_btn_autofocus(self, _):
self.stream_panel.Enable(False)
self.pause()
self.pauseStream()
f = self.stream.focuser.applyAutofocus(self.stream.detector)
f.add_done_callback(self._on_autofunction_done)
@call_in_wx_main
def _on_btn_autobc(self, _):
self.stream_panel.Enable(False)
self.pause()
self.pauseStream()
f = self.stream.emitter.applyAutoContrastBrightness(self.stream.detector)
f.add_done_callback(self._on_autofunction_done)
@call_in_wx_main
def _on_btn_autostigmation(self, _):
self.stream_panel.Enable(False)
self.pause()
self.pauseStream()
f = self.stream.emitter.applyAutoStigmator(self.stream.detector)
f.add_done_callback(self._on_autofunction_done)
@call_in_wx_main
def _on_autofunction_done(self, f):
self.stream_panel.Enable(True)
self.resume()
# Don't automatically resume stream, autofunctions can take a long time.
# The user might not be at the system after the functions complete, so the stream
# would play idly.
class StreamBarController(object):
"""
Manages the streams and their corresponding stream panels in the stream bar.
In particular it takes care of:
* Defining the menu entries for adding streams
* Play/pause the streams, via a "scheduler"
* Play/pause the spot stream in spot mode
* Connects the ROA to the .roi of RepetitionStream
* Shows the repetition overlay when the repetition setting is focused
"""
def __init__(self, tab_data, stream_bar, static=False, locked=False, ignore_view=False,
view_ctrl=None):
"""
:param tab_data: (MicroscopyGUIData) the representation of the microscope Model
:param stream_bar: (StreamBar) an empty stream bar
:param static: (bool) Treat streams as static (can't play/pause)
:param locked: (bool) Don't allow to add/remove/hide/show streams
:param ignore_view: (bool) don't change the visible panels on focussed
view change. If False and not locked, it will show the panels
compatible with the focussed view. If False and locked, it will show
the panels which are seen in the focussed view.
:param view_ctrl (ViewPortController or None): Only required to show
repetition and on the SPARC ensure the right view is shown.
"""
self._tab_data_model = tab_data
self._main_data_model = tab_data.main
self._stream_bar = stream_bar
# Never allow SEM and CLi Stream to play with spot mode (because they are
# spatial, so it doesn't make sense to see just one point), and force
# AR, Spectrum, and Monochromator streams with spot mode (because the
# first two otherwise could be playing with beam "blanked", which shows
# weird signal, and the last one would be very slow to update).
# TODO: it could make sense to allow AR and Spectrum stream to play
# while doing a normal scan, but the scheduler would need to allow playing
# on spatial stream simultaneously (just the SE?) and force to play it
# when not in spot mode. (for now, we keep it simple)
self._spot_incompatible = (acqstream.SEMStream, acqstream.CLStream, acqstream.OpticalStream)
self._spot_required = (acqstream.ARStream, acqstream.SpectrumStream,
acqstream.MonochromatorSettingsStream,
acqstream.ScannedTCSettingsStream,
acqstream.ScannedTemporalSettingsStream,
acqstream.TemporalSpectrumSettingsStream,
)
tab_data.tool.subscribe(self.on_tool_change)
self._view_controller = view_ctrl
self.stream_controllers = []
self._roi_listeners = {} # (Repetition)Stream -> callable
self._show_reps = {} # Stream -> (rep, style)
# This attribute indicates whether live data is processed by the streams
# in the controller, or that they just display static data.
self.static_mode = static
# Disable all controls
self.locked_mode = locked
self.menu_actions = collections.OrderedDict() # title => callback
self._scheduler_subscriptions = {} # stream -> callable
self._sched_policy = SCHED_LAST_ONE # works well in most cases
self._createAddStreamActions()
# Don't hide or show stream panel when the focused view changes
self.ignore_view = ignore_view
self._prev_view = None
self._tab_data_model.focussedView.subscribe(self._onView, init=True)
# FIXME: don't use pubsub events, but either wxEVT or VAs. For now every
# stream controller is going to try to remove the stream.
pub.subscribe(self.removeStream, 'stream.remove')
# Stream preparation future
self.preparation_future = model.InstantaneousFuture()
# If any stream already present: listen to them in the scheduler (but
# don't display)
for s in tab_data.streams.value:
logging.debug("Adding stream present at init to scheduler: %s", s)
self._scheduleStream(s)
# TODO: use the same behaviour on the SPARC
self._spot_stream = None
if hasattr(tab_data, "spotStream") and tab_data.spotStream:
self._spot_stream = tab_data.spotStream
self._scheduleStream(self._spot_stream)
def pause(self):
""" Pause (=freeze) SettingEntry related control updates """
for stream_controller in self.stream_controllers:
stream_controller.pause()
def resume(self):
""" Resume SettingEntry related control updates """
for stream_controller in self.stream_controllers:
stream_controller.resume()
def enable(self, enabled):
""" Enable or disable all the streambar controls """
for stream_controller in self.stream_controllers:
stream_controller.enable(enabled)
self._stream_bar.btn_add_stream.Enable(enabled)
# unused (but in test case)
def get_actions(self):
return self.menu_actions
# TODO need to have actions enabled/disabled depending on the context:
# * if microscope is off/pause => disabled
# * if focused view is not about this type of stream => disabled
# * if there can be only one stream of this type, and it's already present
# => disabled
def add_action(self, title, callback, check_enabled=None):
""" Add an action to the stream menu
It's added at the end of the list. If an action with the same title exists, it is replaced.
:param title: (string) Text displayed in the menu
:param callback: (callable) function to call when the action is selected
"""
if self._stream_bar.btn_add_stream is None:
logging.error("No add button present!")
else:
logging.debug("Adding %s action to stream panel", title)
self.menu_actions[title] = callback
self._stream_bar.btn_add_stream.add_choice(title, callback, check_enabled)
def add_overview_action(self, callback):
""" Add an overview action to the button
:param callback: (callable) function to call when the action is selected
"""
if self._stream_bar.btn_add_stream is None:
logging.error("No add button present!")
else:
logging.debug("Enabling add overview")
self._stream_bar.hide_add_button()
self._stream_bar.show_overview_button()
self._stream_bar.btn_add_overview.Bind(wx.EVT_BUTTON, callback)
def remove_action(self, title):
"""
Remove the given action, if it exists. Otherwise does nothing
title (string): name of the action to remove
"""
if title in self.menu_actions:
logging.debug("Removing %s action from stream panel", title)
del self.menu_actions[title]
self._stream_bar.btn_add_stream.remove_choice(title)
def to_static_mode(self):
self.static_mode = True
def to_locked_mode(self):
self.locked_mode = True
def setSchedPolicy(self, policy):
"""
Change the stream scheduling policy
policy (SCHED_*): the new policy
"""
assert policy in (SCHED_LAST_ONE, SCHED_ALL)
self._sched_policy = policy
def _createAddStreamActions(self):
"""
Create the compatible "add stream" actions according to the current
microscope.
To be executed only once, at initialisation.
"""
pass
def _userAddFluo(self, **kwargs):
""" Called when the user request adding a Fluo stream
Same as addFluo, but also changes the focus to the name text field
"""
se = self.addFluo(**kwargs)
se.stream_panel.set_focus_on_label()
def _ensure_power_non_null(self, stream):
"""
Ensure the power VA of a stream is not 0. The goal is to make sure
that when the stream will start playing, directly some data will be
obtained (to avoid confusing the user). In practice, if it is 0, a small
value (10%) will be set.
stream (Stream): the stream with a power VA
"""
if stream.power.value > 0.:
return
# Automatically picks some power if it was at 0 W (due to the stream
# defaulting to the current hardware settings), so that the user is not
# confused when playing the stream and nothing happens.
if hasattr(stream.power, "range"):
stream.power.value = stream.power.range[1] * 0.1
elif hasattr(stream.power, "choices"):
stream.power.value = sorted(stream.power.choices)[1]
else:
logging.info("Stream power has no info about min/max")
def addFluo(self, **kwargs):
"""
Creates a new fluorescence stream and a stream panel in the stream bar
returns (StreamController): the panel created
"""
# Find a name not already taken
names = [s.name.value for s in self._tab_data_model.streams.value]
for i in range(1, 1000):
name = "Filtered colour %d" % i
if name not in names:
break
else:
logging.error("Failed to find a new unique name for stream")
name = "Filtered colour"
s = acqstream.FluoStream(
name,
self._main_data_model.ccd,
self._main_data_model.ccd.data,
self._main_data_model.light,
self._main_data_model.light_filter,
focuser=self._main_data_model.focus,
opm=self._main_data_model.opm,
detvas={"exposureTime"},
)
self._ensure_power_non_null(s)
# TODO: automatically pick a good set of excitation/emission which is
# not yet used by any FluoStream (or the values from the last stream
# deleted?) Or is it better to just use the values fitting the current
# hardware settings as it is now?
return self._add_stream(s, **kwargs)
def addBrightfield(self, **kwargs):
"""
Creates a new brightfield stream and panel in the stream bar
returns (StreamController): the stream panel created
"""
s = acqstream.BrightfieldStream(
"Bright-field",
self._main_data_model.ccd,
self._main_data_model.ccd.data,
self._main_data_model.brightlight,
focuser=self._main_data_model.focus,
opm=self._main_data_model.opm,
detvas={"exposureTime"},
)
self._ensure_power_non_null(s)
return self._add_stream(s, **kwargs)
def addDarkfield(self, **kwargs):
"""
Creates a new darkfield stream and panel in the stream bar
returns (StreamController): the stream panel created
"""
# Note: it's also displayed as 'brightfield' stream
s = acqstream.BrightfieldStream(
"Dark-field",
self._main_data_model.ccd,
self._main_data_model.ccd.data,
self._main_data_model.backlight,
focuser=self._main_data_model.focus,
opm=self._main_data_model.opm,
detvas={"exposureTime"},
)
self._ensure_power_non_null(s)
return self._add_stream(s, **kwargs)
def addConfocal(self, detector, **kwargs):
"""
Creates a new confocal stream and panel in the stream bar
detector (Detector): the photo-detector to use
returns (StreamController): the stream panel created
"""
# As there is only one stream per detector, we can put its VAs directly
# instead of them being a local copy. This also happens to work around
# an issue with detecting IntContinuous in local VAs.
# set_stream contains the shared settings for the laser_mirror and light
s = acqstream.ScannedFluoStream(
"Confocal %s" % (detector.name,),
detector,
detector.data,
self._main_data_model.light,
self._main_data_model.laser_mirror,
self._main_data_model.light_filter,
focuser=self._main_data_model.focus,
opm=self._main_data_model.opm,
hwdetvas=get_local_vas(detector, self._main_data_model.hw_settings_config),
setting_stream=self._tab_data_model.confocal_set_stream,
)
return self._add_stream(s, **kwargs)
def addSEMSED(self, **kwargs):
""" Creates a new SED stream and panel in the stream bar
return (StreamController) The controller created for the SED stream
"""
return self._add_sem_stream("Secondary electrons",
self._main_data_model.sed, **kwargs)
def addSEMBSD(self, **kwargs):
"""
Creates a new backscattered electron stream and panel in the stream bar
returns (StreamPanel): the panel created
"""
return self._add_sem_stream("Backscattered electrons",
self._main_data_model.bsd, **kwargs)
def addEBIC(self, **kwargs):
"""
Creates a new EBIC stream and panel in the stream bar
returns (StreamPanel): the panel created
"""
return self._add_sem_stream("EBIC", self._main_data_model.ebic, **kwargs)
def addScannedTCSettings(self, **kwargs):
"""
Creates a new ScannedTCSettingStream and panel in the stream bar.
returns (StreamPanel): the panel created
"""
s = acqstream.ScannedTCSettingsStream(
"FLIM",
self._main_data_model.tc_detector,
self._main_data_model.light,
self._main_data_model.laser_mirror,
self._main_data_model.time_correlator,
scanner_extra=self._main_data_model.tc_scanner,
tc_detector_live=self._main_data_model.tc_detector_live,
opm=self._main_data_model.opm,
emtvas=get_local_vas(self._main_data_model.light, self._main_data_model.hw_settings_config),
)
stream_cont = self._add_stream(s, add_to_view=True, **kwargs)
# TODO: should we really not show this visible button? Left-over from SPARC.
# Currently, as it can only be seen on its own view, it actually makes sense.
stream_cont.stream_panel.show_visible_btn(False)
return stream_cont
def _add_sem_stream(self, name, detector, **kwargs):
if self._main_data_model.role == "delphi":
# For the Delphi, the SEM stream needs to be more "clever" because
# it needs to run a simple spot alignment every time the stage has
# moved before starting to acquire.
s = acqstream.AlignedSEMStream(
name,
detector,
detector.data,
self._main_data_model.ebeam,
self._main_data_model.ccd,
self._main_data_model.stage,
self._main_data_model.focus,
focuser=self._main_data_model.ebeam_focus,
opm=self._main_data_model.opm,
shiftebeam=acqstream.MTD_EBEAM_SHIFT
)
else:
# Hack: If the blanker doesn't support "automatic" mode (None),
# we have a trick to control the blanker in the stream. Ideally,
# this would be done by the optical-path manager, or by the e-beam
# driver (by always providing a None option).
# We only do this on the SECOM, because on the SPARC it's less of an
# issue, and we would need to change a lot more streams.
# TODO: remove once the CompositedScanner supports automatic blanker.
if (self._main_data_model.role == "secom" and
model.hasVA(self._main_data_model.ebeam, "blanker") and
None not in self._main_data_model.ebeam.blanker.choices
):
blanker = self._main_data_model.ebeam.blanker
else:
blanker = None
s = acqstream.SEMStream(
name,
detector,
detector.data,
self._main_data_model.ebeam,
focuser=self._main_data_model.ebeam_focus,
opm=self._main_data_model.opm,
blanker=blanker
)
# If the detector already handles brightness and contrast, don't do it by default
# TODO: check if it has .applyAutoContrast() instead (once it's possible)
if (s.intensityRange.range == ((0, 0), (255, 255)) and
model.hasVA(detector, "contrast") and
model.hasVA(detector, "brightness")):
s.auto_bc.value = False
s.intensityRange.value = (0, 255)
return self._add_stream(s, **kwargs)
def addStatic(self, name, image, cls=acqstream.StaticStream, **kwargs):
""" Creates a new static stream and stream controller
:param name: (string)
:param image: (DataArray)
:param cls: (class of Stream)
:param returns: (StreamController): the controller created
"""
s = cls(name, image)
return self.addStream(s, **kwargs)
def addStream(self, stream, **kwargs):
""" Create a stream entry for the given existing stream
Must be run in the main GUI thread.
:return StreamPanel: the panel created for the stream
"""
return self._add_stream(stream, **kwargs)
def _add_stream(self, stream, add_to_view=False, visible=True, play=None):
""" Add the given stream to the tab data model and appropriate views
Args:
stream (Stream): the new stream to add
Kwargs:
add_to_view (boolean or View): if True, add the stream to all the compatible views,
if False add to the current view, otherwise, add to the given view.
visible (boolean): If True, create a stream entry, otherwise adds the stream but do not
create any entry.
play (None or boolean): If True, immediately start it, if False, let it stopped, and if
None, only play if already a stream is playing.
Returns:
(StreamController or Stream): the stream controller or stream (if visible is False) that
was created
"""
if stream not in self._tab_data_model.streams.value:
# Insert it as first, so it's considered the latest stream used
self._tab_data_model.streams.value.insert(0, stream)
fview = self._tab_data_model.focussedView.value
if add_to_view is True:
for v in self._tab_data_model.visible_views.value:
if hasattr(v, "stream_classes") and isinstance(stream, v.stream_classes):
v.addStream(stream)
else:
if add_to_view is False:
v = fview
else:
v = add_to_view
if hasattr(v, "stream_classes") and not isinstance(stream, v.stream_classes):
warn = "Adding %s stream incompatible with the view %s"
logging.warning(warn, stream.__class__.__name__, v.name.value)
v.addStream(stream)
# TODO: create a StreamScheduler call it like self._scheduler.addStream(stream)
# ... or simplify to only support a stream at a time
self._scheduleStream(stream)
# start the stream right now (if requested)
if play is None:
if not visible:
play = False
else:
play = any(s.should_update.value for s in self._tab_data_model.streams.value)
stream.should_update.value = play
if visible:
linked_view = None
if self.ignore_view: # Always show the stream panel
show_panel = True
if not isinstance(add_to_view, bool):
linked_view = v
elif self.locked_mode: # (and don't ignore_view)
# Show the stream panel iif the view is showing the stream
show_panel = stream in fview.getStreams()
else: # (standard = not locked and don't ignore_view)
# Show the stream panel iif the view could display the stream
show_panel = isinstance(stream, fview.stream_classes)
stream_cont = self._add_stream_cont(stream,
show_panel,
locked=self.locked_mode,
static=self.static_mode,
view=linked_view,
)
return stream_cont
else:
return stream
def _add_stream_cont(self, stream, show_panel=True, locked=False, static=False,
view=None):
""" Create and add a stream controller for the given stream
:return: (StreamController)
"""
stream_cont = StreamController(self._stream_bar, stream, self._tab_data_model,
show_panel, view, sb_ctrl=self)
if locked:
stream_cont.to_locked_mode()
elif static:
stream_cont.to_static_mode()
self.stream_controllers.append(stream_cont)
# Only connect the .roi of RepetitionStreams (ie, has .repetition)
if hasattr(stream, "repetition") and hasattr(self._tab_data_model, "roa"):
self._connectROI(stream)
return stream_cont
# === VA handlers
def _onView(self, view):
""" Handle the changing of the focused view """
if not view or self.ignore_view:
return
if self.locked_mode:
# hide/show the stream panels of the streams visible in the view
allowed_streams = view.getStreams()
for e in self._stream_bar.stream_panels:
e.Show(e.stream in allowed_streams)
else:
# hide/show the stream panels which are compatible with the view
allowed_classes = view.stream_classes
for e in self._stream_bar.stream_panels:
e.Show(isinstance(e.stream, allowed_classes))
self._stream_bar.fit_streams()
# update the "visible" icon of each stream panel to match the list
# of streams in the view
if self._prev_view is not None:
self._prev_view.stream_tree.flat.unsubscribe(self._on_visible_streams)
view.stream_tree.flat.subscribe(self._on_visible_streams, init=True)
self._prev_view = view
def _on_visible_streams(self, flat):
# Convert the DataProjections into Stream
visible_streams = [s if isinstance(s, acqstream.Stream) else s.stream for s in flat]
for e in self._stream_bar.stream_panels:
e.set_visible(e.stream in visible_streams)
def _onStreamUpdate(self, stream, updated):
"""
Called when a stream "updated" state changes
"""
# This is a stream scheduler:
# * "should_update" streams are the streams to be scheduled
# * a stream becomes "active" when it's currently acquiring
# * when a stream is just set to be "should_update" (by the user) it
# should be scheduled as soon as possible
# Note we ensure that .streams is sorted with the new playing stream as
# the first one in the list. This means that .streams is LRU sorted,
# which can be used for various stream information.
# TODO: that works nicely for live tabs, but in analysis tab, this
# never happens so the latest stream is always the same one.
# => need more ways to change current stream (at least pick one from the
# current view?)
# Get the spot Stream, if the model has one, otherwise None
spots = getattr(self._tab_data_model, "spotStream", None)
# Don't mess too much with the spot stream => just copy "should_update"
if stream is spots:
stream.is_active.value = updated
return
if self._sched_policy == SCHED_LAST_ONE:
# Only last stream with should_update is active
if not updated:
self._prepareAndActivate(stream, False)
# the other streams might or might not be updated, we don't care
else:
# FIXME: hack to not stop the spot stream => different scheduling policy?
# Make sure that other streams are not updated (and it also
# provides feedback to the user about which stream is active)
for s, cb in self._scheduler_subscriptions.items():
if (s not in (stream, spots) and
(s.should_update.value or s.is_active.value)):
try:
self._prepareAndActivate(s, False)
s.should_update.unsubscribe(cb) # don't inform us of that change
s.should_update.value = False
s.should_update.subscribe(cb)
except Exception:
logging.exception("Failed to stop stream %s", stream.name.value)
# prepare and activate this stream
# It's important it's last, to ensure hardware settings don't
# mess up with each other.
self._prepareAndActivate(stream, True)
elif self._sched_policy == SCHED_ALL:
# All streams with should_update are active
# TODO: there is probably no way it works as-is (and it's never used anyway)
self._prepareAndActivate(stream, updated)
else:
raise NotImplementedError("Unknown scheduling policy %s" % self._sched_policy)
if updated:
# Activate or deactivate spot mode based on what the stream needs
# Note: changing tool is fine, because it will only _pause_ the
# other streams, and we will not come here again.
if isinstance(stream, self._spot_incompatible) and spots:
if self._tab_data_model.tool.value == TOOL_SPOT:
logging.info("Stopping spot mode because %s starts", stream)
self._tab_data_model.tool.value = TOOL_NONE
spots.is_active.value = False
elif isinstance(stream, self._spot_required) and spots:
logging.info("Starting spot mode because %s starts", stream)
was_active = spots.is_active.value
self._tab_data_model.tool.value = TOOL_SPOT
# Hack: to be sure the settings of the spot streams are correct
# (because the concurrent stream might have changed them, cf
# Monochromator), we stop/start it each time a stream plays
if was_active:
# FIXME: when switching from one Monochromator stream to
# another one, it seems to mess up the resolution on the
# first time => needs to be done after the old stream is paused
# and before the new one plays
logging.debug("Resetting spot mode")
spots.is_active.value = False
spots.is_active.value = True
# put it back to the beginning of the list to indicate it's the
# latest stream used
l = self._tab_data_model.streams.value
try:
i = l.index(stream)
except ValueError:
logging.info("Stream %s is not in the stream list", stream.name)
return
if i == 0:
return # fast path
l = [stream] + l[:i] + l[i + 1:] # new list reordered
self._tab_data_model.streams.value = l
else:
# The stream is now paused. If it used the spot stream, pause that one too.
if isinstance(stream, self._spot_required) and spots:
self._tab_data_model.tool.value = TOOL_NONE
spots.is_active.value = False
def on_tool_change(self, tool):
""" Pause the SE and CLI streams when the Spot mode tool is activated """
if hasattr(self._tab_data_model, 'spotStream'):
spots = self._tab_data_model.spotStream
if tool == TOOL_SPOT:
# Make sure the streams non compatible are not playing
paused_st = self.pauseStreams(self._spot_incompatible)
spots.should_update.value = True
else:
# Make sure that the streams requiring the spot are not playing
paused_st = self.pauseStreams(self._spot_required)
spots.should_update.value = False
def _prepareAndActivate(self, stream, updated):
"""
Prepare and activate the given stream.
stream (Stream): the stream to prepare and activate.
"""
# Cancel the previous preparation in case it's still trying
self.preparation_future.cancel()
if updated:
self._main_data_model.is_preparing.value = True
self.preparation_future = stream.prepare()
cb_on_prepare = functools.partial(self._canActivate, stream)
self.preparation_future.add_done_callback(cb_on_prepare)
else:
stream.is_active.value = False
@call_in_wx_main
def _canActivate(self, stream, future):
self._main_data_model.is_preparing.value = False
if future.cancelled():
logging.debug("Not activating %s as its preparation was cancelled", stream.name.value)
elif not stream.should_update.value:
logging.debug("Not activating %s as it is now paused", stream.name.value)
else:
try:
future.result()
except Exception:
logging.exception("Preparation of %s failed, but will activate the stream anyway",
stream.name.value)
else:
logging.debug("Preparation of %s completed, will activate it", stream.name.value)
stream.is_active.value = True
# Mostly to avoid keeping ref to the stream (hold in the callback)
self.preparation_future = model.InstantaneousFuture()
def _scheduleStream(self, stream):
""" Add a stream to be managed by the update scheduler.
stream (Stream): the stream to add. If it's already scheduled, it's fine.
"""
# create an adapted subscriber for the scheduler
def detectUpdate(updated, stream=stream):
self._onStreamUpdate(stream, updated)
self._scheduler_subscriptions[stream] = detectUpdate
stream.should_update.subscribe(detectUpdate)
def _unscheduleStream(self, stream):
"""
Remove a stream from being managed by the scheduler. It will also be
stopped from updating.
stream (Stream): the stream to remove. If it's not currently scheduled,
it's fine.
"""
stream.is_active.value = False
stream.should_update.value = False
if stream in self._scheduler_subscriptions:
callback = self._scheduler_subscriptions.pop(stream)
stream.should_update.unsubscribe(callback)
# TODO: shall we also have a suspend/resume streams that directly changes
# is_active, and used when the tab/window is hidden?
def enableStreams(self, enabled, classes=acqstream.Stream):
"""
Enable/disable the play/pause button of all the streams of the given class
enabled (boolean): True if the buttons should be enabled, False to
disable them.
classes (class or list of class): classes of streams that should be
disabled.
Returns (set of Stream): streams which were actually enabled/disabled
"""
streams = set() # stream changed
for e in self._stream_bar.stream_panels:
s = e.stream
if isinstance(s, classes):
streams.add(s)
e.enable_updated_btn(enabled)
return streams
def pauseStreams(self, classes=acqstream.Stream):
"""
Pause (deactivate and stop updating) all the streams of the given class
classes (class or list of class): classes of streams that should be
disabled.
Returns (set of Stream): streams which were actually paused
"""
streams = set() # stream paused
for s in self._tab_data_model.streams.value:
if isinstance(s, classes):
if s.should_update.value:
streams.add(s)
s.is_active.value = False
s.should_update.value = False
# TODO also disable stream panel "update" button?
return streams
def resumeStreams(self, streams):
"""
(Re)start (activate) streams
streams (set of streams): Streams that will be resumed
"""
for s in streams:
s.should_update.value = True
# it will be activated by the stream scheduler
def removeStreamPanel(self, stream):
"""
Remove the stream & its panel
"""
sp = next((sp for sp in self._stream_bar.stream_panels if sp.stream == stream), None)
if sp:
# Simulate clicking the remove stream button (will take care or removing the stream & panel)
sp.on_remove_btn(stream)
def removeStream(self, stream):
""" Removes the given stream
Args:
stream (Stream): the stream to remove
Note:
The stream panel is to be destroyed separately via the stream_bar.
It's ok to call if the stream has already been removed.
"""
# don't schedule any more
self._unscheduleStream(stream)
self._disconnectROI(stream)
# Remove from the views
for v in self._tab_data_model.views.value:
if hasattr(v, "removeStream"):
# logging.warn("> %s > %s", v, stream)
v.removeStream(stream)
# Remove from the list of streams
try:
self._tab_data_model.streams.value.remove(stream)
logging.debug("%s removed", stream)
except ValueError:
# Can happen, as all the tabs receive this event
logging.info("%s not found, so not removed", stream)
# Remove the corresponding stream controller
for sc in self.stream_controllers:
if sc.stream is stream:
self.stream_controllers.remove(sc)
break
else:
logging.info("Stream controller of %s not found", stream)
# Explicitly collect garbage, because for some reason not all stream controllers were
# collected immediately, which would keep a reference to the Stream object, which in turn
# would prevent the Stream render thread from terminating.
gc.collect()
def clear(self, clear_model=True):
"""
Remove all the streams (from the model and the GUI)
Must be called in the main GUI thread
:param clear_model: (bool) if True, streams will be removed from model
"""
# We could go for each stream panel, and call removeStream(), but it's
# as simple to reset all the lists
# clear the graphical part
self._stream_bar.clear()
# Remove from the views
for stream in self._tab_data_model.streams.value:
for v in self._tab_data_model.views.value:
if hasattr(v, "removeStream"):
v.removeStream(stream)
# clear the interface model
# (should handle cases where a new stream is added simultaneously)
if clear_model:
while self._tab_data_model.streams.value:
stream = self._tab_data_model.streams.value.pop()
self._unscheduleStream(stream)
self._disconnectROI(stream)
# Clear the stream controller
self.stream_controllers = []
gc.collect()
if self._has_streams() or self._has_visible_streams():
logging.warning("Failed to remove all streams")
def _has_streams(self):
return len(self._stream_bar.stream_panels) > 0
def _has_visible_streams(self):
return any(s.IsShown() for s in self._stream_bar.stream_panels)
# ROA synchronisation methods
# When the ROA is updated:
# 1. The ROA is copied to the "main" stream
# 2. The ROI of the main stream is copied back to the ROA (to give feedback)
# 3. The ROA is copied to the other streams
#
# When a ROI of a stream is changed (but not due to ROA change):
# 1. It's copied to the ROA
# 2. It's copied to the other streams
#
# Updating the ROI requires a bit of care, because the streams might
# update back their ROI with a modified value. To avoid loops, we disable
# and re-enable before and after each (direct) change.
def _connectROI(self, stream):
"""
Connect the .roi of the (repetition) stream to the global ROA
"""
# First, start with the same ROI as the global ROA
stream.roi.value = self._tab_data_model.roa.value
self._tab_data_model.roa.subscribe(self._onROA)
listener = functools.partial(self._onStreamROI, stream)
stream.roi.subscribe(listener)
self._roi_listeners[stream] = listener
def _disconnectROI(self, stream):
"""
Remove ROI subscriptions for the stream.
It's fine to call for a stream which is not connected to ROI.
stream (Stream): the stream being removed
"""
if stream in self._roi_listeners:
logging.debug("Removing %s from ROI subscriptions", stream)
# Removing the callable from the roi_listeners should be sufficient,
# as the callable should be unreferenced and free'd, which should drop
# it from the subscriber... but let's make everything explicit.
stream.roi.unsubscribe(self._roi_listeners[stream])
del self._roi_listeners[stream]
def _disableROISub(self):
self._tab_data_model.roa.unsubscribe(self._onROA)
for s, listener in self._roi_listeners.items():
s.roi.unsubscribe(listener)
def _enableROISub(self):
self._tab_data_model.roa.subscribe(self._onROA)
for s, listener in self._roi_listeners.items():
s.roi.subscribe(listener)
def _onStreamROI(self, stream, roi):
"""
Called when the ROI of a stream is changed.
Used to update the global ROA.
stream (Stream): the stream which is changed
roi (4 floats): roi
"""
self._disableROISub()
try:
# Set the global ROA to the new ROI (defined by the user)
logging.debug("Setting ROA from %s to %s", stream.name.value, roi)
self._tab_data_model.roa.value = roi
# Update all the other streams to (almost) the same ROI too
for s in self._roi_listeners:
if s is not stream:
logging.debug("Setting ROI of %s to %s", s.name.value, roi)
s.roi.value = roi
finally:
self._enableROISub()
def _onROA(self, roa):
"""
Called when the ROA is changed
To synchronise global ROA -> streams ROI
"""
self._disableROISub()
try:
# ROI-related streams, in LRU order => the first one is the "main" stream
roi_ss = [s for s in self._tab_data_model.streams.value
if s in self._roi_listeners]
for i, s in enumerate(roi_ss):
logging.debug("Setting ROI of %s to %s", s.name.value, roa)
s.roi.value = roa
if i == 0:
# Read back the ROA from the "main" stream (= latest played)
logging.debug("Setting ROA back from %s to %s",
s.name.value, s.roi.value)
roa = s.roi.value
self._tab_data_model.roa.value = roa
finally:
self._enableROISub()
def show_roa_repetition(self, stream, rep, style=None):
"""
Request the views to show (or not) the repetition for the given stream
Depending on requests from other streams, it might or not be actually done
stream (Stream): stream for which the display is requested
rep (None or tuple of 2 ints): if None, repetition is hidden
style (overlay.FILL_*): type of repetition display
"""
# Update the list of streams that want to show their repetition
if rep:
self._show_reps[stream] = (rep, style)
else:
self._show_reps.pop(stream, None) # remove iff present
# Pick the best repetition: use the latest stream which has something to show
rep, style = None, None # default is "no show"
for s in self._tab_data_model.streams.value:
if s in self._show_reps:
rep, style = self._show_reps[s]
break
if not self._view_controller:
# Too bad, but this can happen if the GUI has no viewport controller
# (ie, the AcquisitionDialog
logging.info("Can not show repetition, as view controller is unknown")
return
# Update all the views which care (ie, spatial/SEM/Optical view)
# TODO: instead, look at each canvas which has a allowed_modes TOOL_ROA?
views = self._tab_data_model.visible_views.value
em_views = [v for v in views if (issubclass(acqstream.EMStream, v.stream_classes) or
issubclass(acqstream.OpticalStream, v.stream_classes))]
for em_view in em_views:
vp = self._view_controller.get_viewport_by_view(em_view)
vp.canvas.show_repetition(rep, style)
class SecomStreamsController(StreamBarController):
"""
Controls the streams for the SECOM and DELPHI live view
"""
def _createAddStreamActions(self):
""" Create the compatible "add stream" actions according to the current microscope.
To be executed only once, at initialisation.
"""
# Basically one action per type of stream
# TODO: always display the action (if it's compatible), but disable the
# play/pause button if the microscope state doesn't allow it (IOW if SEM
# or optical button is disabled)
# First: Fluorescent stream (for dyes)
if (
self._main_data_model.light and
self._main_data_model.light_filter and
self._main_data_model.ccd
):
def fluor_capable():
enabled = self._main_data_model.chamberState.value in {guimodel.CHAMBER_VACUUM,
guimodel.CHAMBER_UNKNOWN}
view = self._tab_data_model.focussedView.value
compatible = view.is_compatible(acqstream.FluoStream)
return enabled and compatible
# TODO: how to know it's _fluorescent_ microscope?
# => multiple source? filter?
self.add_action("Filtered colour", self._userAddFluo, fluor_capable)
# Bright-field & Dark-field are almost identical but for the emitter
def brightfield_capable():
enabled = self._main_data_model.chamberState.value in {guimodel.CHAMBER_VACUUM,
guimodel.CHAMBER_UNKNOWN}
view = self._tab_data_model.focussedView.value
compatible = view.is_compatible(acqstream.BrightfieldStream)
return enabled and compatible
if self._main_data_model.brightlight and self._main_data_model.ccd:
self.add_action("Bright-field", self.addBrightfield, brightfield_capable)
if self._main_data_model.backlight and self._main_data_model.ccd:
self.add_action("Dark-field", self.addDarkfield, brightfield_capable)
def confocal_capable(detector):
enabled = self._main_data_model.chamberState.value in {guimodel.CHAMBER_VACUUM,
guimodel.CHAMBER_UNKNOWN}
# Only allow one stream with the detector at a time
present = any(s.detector is detector for s in self._tab_data_model.streams.value)
view = self._tab_data_model.focussedView.value
compatible = view.is_compatible(acqstream.FluoStream)
return enabled and compatible and not present
if self._main_data_model.laser_mirror:
ds = sorted(self._main_data_model.photo_ds, key=lambda d: d.name)
for pd in ds:
act = functools.partial(self.addConfocal, detector=pd)
cap = functools.partial(confocal_capable, detector=pd)
self.add_action("Confocal %s" % (pd.name,), act, cap)
def sem_capable():
""" Check if focussed view is compatible with a SEM stream """
enabled = self._main_data_model.chamberState.value in {guimodel.CHAMBER_VACUUM,
guimodel.CHAMBER_UNKNOWN}
view = self._tab_data_model.focussedView.value
compatible = view.is_compatible(acqstream.SEMStream)
return enabled and compatible
def flim_capable():
enabled = (self._main_data_model.time_correlator is not None)
view = self._tab_data_model.focussedView.value
compatible = view.is_compatible(acqstream.ScannedTCSettingsStream)
# Check if there is a FLIM stream already
flim_already = any(isinstance(s, acqstream.ScannedTCSettingsStream)
for s in self._tab_data_model.streams.value)
return enabled and compatible and not flim_already
# SED
if self._main_data_model.ebeam and self._main_data_model.sed:
self.add_action("Secondary electrons", self.addSEMSED, sem_capable)
# BSED
if self._main_data_model.ebeam and self._main_data_model.bsd:
self.add_action("Backscattered electrons", self.addSEMBSD, sem_capable)
# EBIC
if self._main_data_model.ebeam and self._main_data_model.ebic:
self.add_action("EBIC", self.addEBIC, sem_capable)
# FLIM
if self._main_data_model.time_correlator is not None:
self.add_action("FLIM", self.addScannedTCSettings, flim_capable)
def _onStreamUpdate(self, stream, updated):
# When a stream starts playing, ensure it's visible in at least one view
if updated:
fv = self._tab_data_model.focussedView.value
if stream not in fv.stream_tree.flat.value and stream is not self._spot_stream:
# if the stream is hidden in the current focused view, then unhide
# it everywhere
for v in self._tab_data_model.views.value:
if isinstance(stream, acqstream.SEMStream) and (not v.is_compatible(acqstream.SEMStream)):
continue
elif isinstance(stream, acqstream.FluoStream) and (not v.is_compatible(acqstream.FluoStream)):
continue
else:
if stream not in v.stream_tree.flat.value:
# make sure we don't display old data
str_img = stream.image.value
if ((str_img is not None) and
(str_img.metadata.get(model.MD_POS, (0, 0)) != self._main_data_model.stage)):
stream.image.value = None
v.addStream(stream)
super(SecomStreamsController, self)._onStreamUpdate(stream, updated)
self._updateMicroscopeStates()
def _updateMicroscopeStates(self):
"""
Update the SEM/optical states based on the stream currently playing
"""
streams = set() # streams currently playing
for s in self._tab_data_model.streams.value:
if s.should_update.value:
streams.add(s)
# optical state = at least one stream playing is optical
if hasattr(self._tab_data_model, 'opticalState'):
if any(isinstance(s, acqstream.OpticalStream) for s in streams):
self._tab_data_model.opticalState.value = guimodel.STATE_ON
else:
self._tab_data_model.opticalState.value = guimodel.STATE_OFF
# sem state = at least one stream playing is sem
if hasattr(self._tab_data_model, 'emState'):
if any(isinstance(s, acqstream.EMStream) for s in streams):
self._tab_data_model.emState.value = guimodel.STATE_ON
else:
self._tab_data_model.emState.value = guimodel.STATE_OFF
class SparcStreamsController(StreamBarController):
"""
Controls the streams for the SPARC acquisition tab
In addition to the standard controller it:
* Knows how to create the special RepeptionStreams
* Updates the .acquisitionStreams when a stream is added/removed
* Connects tab_data.useScanStage to the streams
Note: tab_data.spotStream should be in tab_data.streams
"""
def __init__(self, tab_data, *args, **kwargs):
super(SparcStreamsController, self).__init__(tab_data, *args, **kwargs)
# Each stream will be created both as a SettingsStream and a MDStream
# When the SettingsStream is deleted, automatically remove the MDStream
tab_data.streams.subscribe(self._on_streams)
# Connect the global useScanStage VA to each RepStream
tab_data.useScanStage.subscribe(self._updateScanStage)
def _createAddStreamActions(self):
""" Create the compatible "add stream" actions according to the current microscope.
To be executed only once, at initialisation.
"""
main_data = self._main_data_model
# Basically one action per type of stream
if main_data.ebic:
self.add_action("EBIC", self.addEBIC)
if main_data.cld:
self.add_action("CL intensity", self.addCLIntensity)
# TODO: support every component in .ccds
if main_data.ccd and main_data.lens and model.hasVA(main_data.lens, "polePosition"):
# Some simple SPARC have a CCD which can only do rough chamber view,
# but no actual AR acquisition. This is indicate by not having any
# polePosition VA on the optical path.
self.add_action("Angle-resolved", self.addAR)
# On the SPARCv2, there is potentially 4 different ways to acquire a
# spectrum: two spectrographs, each with two ports.
for sptm in main_data.spectrometers:
if len(main_data.spectrometers) == 1:
actname = "Spectrum"
else:
actname = "Spectrum with %s" % (sptm.name,)
act = functools.partial(self.addSpectrum, name=actname, detector=sptm)
self.add_action(actname, act)
if main_data.streak_ccd:
self.add_action("Temporal spectrum", self.addTemporalSpectrum)
if main_data.monochromator:
self.add_action("Monochromator", self.addMonochromator)
if main_data.time_correlator:
self.add_action("Time Correlator", self.addTimeCorrelator)
def _on_streams(self, streams):
""" Remove MD streams from the acquisition view that have one or more sub streams missing
Also remove the ROI subscriptions and wx events.
Args:
streams (list of streams): The streams currently used in this tab
"""
semcls = self._tab_data_model.semStream
# Clean-up the acquisition streams
for acqs in self._tab_data_model.acquisitionStreams.copy():
if not isinstance(acqs, acqstream.MultipleDetectorStream):
if acqs not in streams:
logging.debug("Removing stream %s from acquisition too",
acqs.name.value)
self._tab_data_model.acquisitionStreams.discard(acqs)
else:
# Are all the sub streams of the MDStreams still there?
for ss in acqs.streams:
# If not, remove the MD stream
if ss is not semcls and ss not in streams:
if isinstance(ss, acqstream.SEMStream):
logging.warning("Removing stream because %s is gone!", ss)
logging.debug("Removing acquisition stream %s because %s is gone",
acqs.name.value, ss.name.value)
self._tab_data_model.acquisitionStreams.discard(acqs)
break
def _getAffectingSpectrograph(self, comp):
"""
Find which spectrograph matters for the given component (ex, spectrometer)
comp (Component): the hardware which is affected by a spectrograph
return (None or Component): the spectrograph affecting the component
"""
cname = comp.name
main_data = self._main_data_model
for spg in (main_data.spectrograph, main_data.spectrograph_ded):
if spg is not None and cname in spg.affects.value:
return spg
else:
logging.warning("No spectrograph found affecting component %s", cname)
# spg should be None, but in case it's an error in the microscope file
# and actually, there is a spectrograph, then use that one
return main_data.spectrograph
def addEBIC(self, **kwargs):
# Need to use add_to_view=True to force only showing on the right
# view (and not onself.cnvs.update_drawing() the current view)
# TODO: should it be handled the same way as CLIntensity? (ie, respects
# the ROA)
return super(SparcStreamsController, self).addEBIC(add_to_view=True, **kwargs)
def _add_sem_stream(self, name, detector, **kwargs):
# Only put some local VAs, the rest should be global on the SE stream
emtvas = get_local_vas(self._main_data_model.ebeam, self._main_data_model.hw_settings_config)
emtvas &= {"resolution", "dwellTime", "scale"}
s = acqstream.SEMStream(
name,
detector,
detector.data,
self._main_data_model.ebeam,
focuser=self._main_data_model.ebeam_focus,
emtvas=emtvas,
detvas=get_local_vas(detector, self._main_data_model.hw_settings_config),
)
# If the detector already handles brightness and contrast, don't do it by default
# TODO: check if it has .applyAutoContrast() instead (once it's possible)
if (s.intensityRange.range == ((0, 0), (255, 255)) and
model.hasVA(detector, "contrast") and
model.hasVA(detector, "brightness")):
s.auto_bc.value = False
s.intensityRange.value = (0, 255)
# add the stream to the acquisition set
self._tab_data_model.acquisitionStreams.add(s)
return self._add_stream(s, **kwargs)
def _filter_axes(self, axes):
"""
Given an axes dict from config, filter out the axes which are not
available on the current hardware.
axes (dict str -> (str, Actuator or None)): VA name -> axis+Actuator
returns (dict): the filtered axes
"""
return {va_name: (axis_name, comp)
for va_name, (axis_name, comp) in axes.items()
if comp and axis_name in comp.axes}
def _set_default_spectrum_axes(self, stream):
"""
Try to guess good default values for a spectrum stream's axes
"""
if hasattr(stream, "axisGrating") and hasattr(stream.axisGrating, "choices"):
# Anything *but* mirror is fine
choices = stream.axisGrating.choices
# Locate the mirror entry
mirror = None
if isinstance(choices, dict):
for pos, desc in choices.items():
if "mirror" in desc.lower(): # poor's man definition of a mirror
mirror = pos
break
if mirror is not None and stream.axisGrating.value == mirror:
# Pick the first entry which is not a mirror
for pos in choices:
if pos != mirror:
stream.axisGrating.value = pos
logging.debug("Picking grating %d for spectrum stream", pos)
break
if hasattr(stream, "axisWavelength"):
# Wavelength should be > 0
if stream.axisWavelength.value == 0:
# 600 nm ought to be good for every stream...
# TODO: pick based on the grating's blaze
stream.axisWavelength.value = stream.axisWavelength.clip(600e-9)
if hasattr(stream, "axisFilter") and hasattr(stream.axisFilter, "choices"):
# Use pass-through if available
choices = stream.axisFilter.choices
if isinstance(choices, dict):
for pos, desc in choices.items():
if desc == model.BAND_PASS_THROUGH:
stream.axisFilter.value = pos
logging.debug("Picking pass-through filter (%d) for spectrum stream", pos)
break
def _addRepStream(self, stream, mdstream, **kwargs):
"""
Display and connect a new RepetitionStream to the GUI
stream (RepetitionStream): freshly baked stream
mdstream (MDStream): corresponding new stream for acquisition
axes (dict axis name -> Component): axis entries to create
kwargs (dict): to be passed to _add_stream()
return (StreamController): the new stream controller
"""
if model.hasVA(stream, "useScanStage"):
stream.useScanStage.value = self._tab_data_model.useScanStage.value
stream_cont = self._add_stream(stream, add_to_view=True, **kwargs)
stream_cont.stream_panel.show_visible_btn(False)
# add the acquisition stream to the acquisition set
self._tab_data_model.acquisitionStreams.add(mdstream)
return stream_cont
def addAR(self):
""" Create a camera stream and add to to all compatible viewports """
main_data = self._main_data_model
detvas = get_local_vas(main_data.ccd, self._main_data_model.hw_settings_config)
if main_data.ccd.exposureTime.range[1] < 3600: # 1h
# remove exposureTime from local (GUI) VAs to use a new one, which allows to integrate images
detvas.remove("exposureTime")
axes = self._filter_axes({"filter": ("band", main_data.light_filter)})
ar_stream = acqstream.ARSettingsStream(
"Angle-resolved",
main_data.ccd,
main_data.ccd.data,
main_data.ebeam,
main_data.pol_analyzer,
sstage=main_data.scan_stage,
opm=self._main_data_model.opm,
axis_map=axes,
# TODO: add a focuser for the SPARCv2?
detvas=detvas,
)
# Make sure the binning is not crazy (especially can happen if CCD is shared for spectrometry)
if model.hasVA(ar_stream, "detBinning"):
b = ar_stream.detBinning.value
if b[0] != b[1] or b[0] > 16:
ar_stream.detBinning.value = ar_stream.detBinning.clip((1, 1))
ar_stream.detResolution.value = ar_stream.detResolution.range[1]
# Create the equivalent MDStream
sem_stream = self._tab_data_model.semStream
sem_ar_stream = acqstream.SEMARMDStream("SEM AR",
[sem_stream, ar_stream])
return self._addRepStream(ar_stream, sem_ar_stream)
def addCLIntensity(self):
""" Create a CLi stream and add to to all compatible viewports """
main_data = self._main_data_model
axes = {"density": ("density", main_data.tc_od_filter)}
# Need to pick the right filter wheel (if there is one)
for fw in (main_data.cl_filter, main_data.light_filter, main_data.tc_filter):
if fw is None:
continue
if main_data.cld.name in fw.affects.value:
axes["filter"] = ("band", fw)
break
axes = self._filter_axes(axes)
cli_stream = acqstream.CLSettingsStream(
"CL intensity",
main_data.cld,
main_data.cld.data,
main_data.ebeam,
sstage=main_data.scan_stage,
focuser=self._main_data_model.ebeam_focus,
opm=self._main_data_model.opm,
axis_map=axes,
emtvas={"dwellTime"},
detvas=get_local_vas(main_data.cld, self._main_data_model.hw_settings_config),
)
# Special "safety" feature to avoid having a too high gain at start
if hasattr(cli_stream, "detGain"):
cli_stream.detGain.value = cli_stream.detGain.range[0]
# Create the equivalent MDStream
sem_stream = self._tab_data_model.semStream
sem_cli_stream = acqstream.SEMMDStream("SEM CLi",
[sem_stream, cli_stream])
ret = self._addRepStream(cli_stream, sem_cli_stream,
play=False
)
# With CLi, often the user wants to get the whole area, same as the survey.
# But it's not very easy to select all of it, so do it automatically.
# (after the controller creation, to automatically set the ROA too)
if cli_stream.roi.value == acqstream.UNDEFINED_ROI:
cli_stream.roi.value = (0, 0, 1, 1)
return ret
def addSpectrum(self, name=None, detector=None):
"""
Create a Spectrum stream and add to to all compatible viewports
name (str or None): name of the stream to be created
detector (Detector or None): the spectrometer to use. If None, it will
use the one with "spectrometer" as role.
"""
main_data = self._main_data_model
if name is None:
name = "Spectrum"
if detector is None:
detector = main_data.spectrometer
logging.debug("Adding spectrum stream for %s", detector.name)
spg = self._getAffectingSpectrograph(detector)
axes = {"wavelength": ("wavelength", spg),
"grating": ("grating", spg),
"slit-in": ("slit-in", spg),
}
axes = self._filter_axes(axes)
# Also add light filter for the spectrum stream if it affects the detector
for fw in (main_data.cl_filter, main_data.light_filter):
if fw is None:
continue
if detector.name in fw.affects.value:
axes["filter"] = ("band", fw)
break
spec_stream = acqstream.SpectrumSettingsStream(
name,
detector,
detector.data,
main_data.ebeam,
sstage=main_data.scan_stage,
opm=self._main_data_model.opm,
axis_map=axes,
# emtvas=get_local_vas(main_data.ebeam, self._main_data_model.hw_settings_config), # no need
detvas=get_local_vas(detector, self._main_data_model.hw_settings_config),
)
self._set_default_spectrum_axes(spec_stream)
# Create the equivalent MDStream
sem_stream = self._tab_data_model.semStream
sem_spec_stream = acqstream.SEMSpectrumMDStream("SEM " + name,
[sem_stream, spec_stream])
return self._addRepStream(spec_stream, sem_spec_stream)
def addTemporalSpectrum(self):
"""
Create a temporal spectrum stream and add to to all compatible viewports
"""
main_data = self._main_data_model
detvas = get_local_vas(main_data.streak_ccd, self._main_data_model.hw_settings_config)
if main_data.streak_ccd.exposureTime.range[1] < 86400: # 24h
# remove exposureTime from local (GUI) VAs to use a new one, which allows to integrate images
detvas.remove("exposureTime")
spg = self._getAffectingSpectrograph(main_data.streak_ccd)
axes = {"wavelength": ("wavelength", spg),
"grating": ("grating", spg),
"slit-in": ("slit-in", spg)}
axes = self._filter_axes(axes)
# Also add light filter for the spectrum stream if it affects the detector
for fw in (main_data.cl_filter, main_data.light_filter):
if fw is None:
continue
if main_data.streak_ccd.name in fw.affects.value:
axes["filter"] = ("band", fw)
break
ts_stream = acqstream.TemporalSpectrumSettingsStream(
"Temporal Spectrum",
main_data.streak_ccd,
main_data.streak_ccd.data,
main_data.ebeam,
main_data.streak_unit,
main_data.streak_delay,
sstage=main_data.scan_stage,
opm=self._main_data_model.opm,
axis_map=axes,
detvas=detvas,
streak_unit_vas=get_local_vas(main_data.streak_unit, self._main_data_model.hw_settings_config))
self._set_default_spectrum_axes(ts_stream)
# Create the equivalent MDStream
sem_stream = self._tab_data_model.semStream
sem_ts_stream = acqstream.SEMTemporalSpectrumMDStream("SEM TempSpec", [sem_stream, ts_stream])
return self._addRepStream(ts_stream, sem_ts_stream)
def addMonochromator(self):
""" Create a Monochromator stream and add to to all compatible viewports """
main_data = self._main_data_model
spg = self._getAffectingSpectrograph(main_data.spectrometer)
axes = {"wavelength": ("wavelength", spg),
"grating": ("grating", spg),
"slit-in": ("slit-in", spg),
"slit-monochromator": ("slit-monochromator", spg),
}
axes = self._filter_axes(axes)
# Also add light filter if it affects the detector
for fw in (main_data.cl_filter, main_data.light_filter):
if fw is None:
continue
if main_data.monochromator.name in fw.affects.value:
axes["filter"] = ("band", fw)
break
monoch_stream = acqstream.MonochromatorSettingsStream(
"Monochromator",
main_data.monochromator,
main_data.monochromator.data,
main_data.ebeam,
sstage=main_data.scan_stage,
opm=self._main_data_model.opm,
axis_map=axes,
emtvas={"dwellTime"},
detvas=get_local_vas(main_data.monochromator, self._main_data_model.hw_settings_config),
)
self._set_default_spectrum_axes(monoch_stream)
# Create the equivalent MDStream
sem_stream = self._tab_data_model.semStream
sem_monoch_stream = acqstream.SEMMDStream("SEM Monochromator",
[sem_stream, monoch_stream])
return self._addRepStream(monoch_stream, sem_monoch_stream,
play=False
)
def addTimeCorrelator(self):
""" Create a Time Correlator stream and add to to all compatible viewports """
main_data = self._main_data_model
axes = {"density": ("density", main_data.tc_od_filter),
"filter": ("band", main_data.tc_filter)}
axes = self._filter_axes(axes)
tc_stream = acqstream.ScannedTemporalSettingsStream(
"Time Correlator",
main_data.time_correlator,
main_data.time_correlator.data,
main_data.ebeam,
opm=self._main_data_model.opm,
axis_map=axes,
detvas=get_local_vas(main_data.time_correlator, self._main_data_model.hw_settings_config)
)
# Create the equivalent MDStream
sem_stream = self._tab_data_model.semStream
sem_tc_stream = acqstream.SEMTemporalMDStream("SEM Time Correlator",
[sem_stream, tc_stream])
return self._addRepStream(tc_stream, sem_tc_stream,
play=False
)
def _onStreamUpdate(self, stream, updated):
# Make sure that the stream is visible in every (compatible) view
if updated:
fv = self._tab_data_model.focussedView.value
if (isinstance(stream, fv.stream_classes) and # view is compatible
stream not in fv.stream_tree):
# Add to the view
fv.addStream(stream)
# Update the graphical display
for e in self._stream_bar.stream_panels:
if e.stream is stream:
e.set_visible(True)
super(SparcStreamsController, self)._onStreamUpdate(stream, updated)
# Make sure the current view is compatible with the stream playing
if updated:
self._view_controller.focusViewWithStream(stream)
def _updateScanStage(self, use):
"""
Updates the useScanStage VAs of each RepStream based on the global
useScanStage VA of the tab.
"""
for s in self._tab_data_model.streams.value:
if model.hasVA(s, "useScanStage"):
s.useScanStage.value = use
class FastEMStreamsController(StreamBarController):
"""
StreamBarController with additional functionality for overview streams (add/remove overview streams from
view if main data .overview_streams VA changes).
"""
def __init__(self, tab_data, *args, **kwargs):
super().__init__(tab_data, *args, **kwargs)
tab_data.main.overview_streams.subscribe(self._on_overview_streams)
def _on_overview_streams(self, _):
ovv_streams = self._tab_data_model.main.overview_streams.value.values()
tab_streams = self._tab_data_model.streams.value
canvas = self._view_controller.viewports[0].canvas
# Remove old streams from view
for s in tab_streams:
if isinstance(s, FastEMOverviewStream) and s not in ovv_streams:
tab_streams.remove(s)
canvas.view.removeStream(s)
# Add stream to view if it's not already there
for s in ovv_streams:
if isinstance(s, FastEMOverviewStream) and s not in tab_streams:
tab_streams.append(s)
canvas.view.addStream(s)
# Blue, green, cyan, yellow, purple, magenta, red
FASTEM_PROJECT_COLOURS = ["#0000ff", "#00ff00", "#00ffff", "#ffff00", "#ff00ff",
"#ff00bf", "#ff0000"]
class FastEMProjectBarController(object):
"""
Creates/removes new the FastEM projects.
"""
def __init__(self, tab_data, project_bar, view_ctrl):
"""
project_bar (FastEMProjectBar): top-level panel containing all project panels
tab_data (FastEMAcquisitionGUIData): tab data model
view_ctrl (FastEMAcquisitionViewport): viewport controller
"""
self._tab_data_model = tab_data
self._main_data_model = tab_data.main
self._project_bar = project_bar
self._view_ctrl = view_ctrl
self.project_ctrls = {} # dict int --> FastEMProjectController
self._project_bar.btn_add_project.Bind(wx.EVT_BUTTON, self._add_project)
# Always show one project by default
self._add_project(None)
def _add_project(self, _):
# Get the smallest number that is not already in use. It's a bit challenging because projects can be
# deleted, so we might have project 2 in colour red, but project 1 in blue has been deleted, so the
# next project (which is now again the second project) should not use red again.
num = next(idx for idx, num in enumerate(sorted(self.project_ctrls.keys()) + [0], 1) if idx != num)
name = "Project-%s" % num
logging.debug("Creating new project %s.", name)
colour = FASTEM_PROJECT_COLOURS[(num - 1) % len(FASTEM_PROJECT_COLOURS)]
project_ctrl = FastEMProjectController(name, colour, self._tab_data_model, self._project_bar, self._view_ctrl)
# Add the project model to tab_data
self.project_ctrls[num] = project_ctrl
self._tab_data_model.projects.value.append(project_ctrl.model)
# Remove callback for every new remove button
project_ctrl.panel.btn_remove.Bind(wx.EVT_BUTTON, lambda evt: self._remove_project(evt, project_ctrl))
def _remove_project(self, _, project_ctrl):
# TODO: open dialog "Are you sure?"
logging.debug("Removing project %s." % project_ctrl.model.name.value)
# Delete all ROIs of the project
# .remove_roa_ctrl automatically removes itself from .roa_ctrls, so a for-loop doesn't work
while project_ctrl.roa_ctrls:
project_ctrl.remove_roa_ctrl(next(iter(project_ctrl.roa_ctrls.values())))
# Remove panel
self._project_bar.remove_project_panel(project_ctrl.panel)
# Remove controller from .project_ctrls list
self.project_ctrls = {key: val for key, val in self.project_ctrls.items() if val != project_ctrl}
# Remove model
self._tab_data_model.projects.value.remove(project_ctrl.model)
# Destroy ROAController object
del project_ctrl
class FastEMProjectController(object):
"""
Controller for a FastEM project. This class is responsible for the creation and maintenance
of ROIs belonging to the project.
During initialization, a panel is created and added to the project bar.
"""
def __init__(self, name, colour, tab_data, project_bar, view_ctrl):
"""
name (str): default name for the project
colour (str): hexadecimal colour code for the bounding box of the roas in the viewport
tab_data (FastEMAcquisitionGUIData): tab data model
project_bar (FastEMProjectBar): top-level panel containing all project panels
view_ctrl (FastEMAcquisitionViewport): viewport controller
"""
self._tab_data = tab_data
self._project_bar = project_bar
self._view_ctrl = view_ctrl
self.roa_ctrls = {} # dict int --> FastEMROAController
self.colour = colour
self.model = guimodel.FastEMProject(name)
# Create the panel and add it to the project bar. Subscribe to controls.
self.panel = FastEMProjectPanel(project_bar, name=name)
project_bar.add_project_panel(self.panel)
self.panel.btn_add_roa.Bind(wx.EVT_BUTTON, self._on_btn_roa)
# Listen to both enter and kill focus event to make sure the text is really updated
self.panel.txt_ctrl.Bind(wx.EVT_KILL_FOCUS, self._on_text)
self.panel.txt_ctrl.Bind(wx.EVT_TEXT_ENTER, self._on_text)
# For ROA creation process
self._current_roa_ctrl = None
def _on_text(self, evt):
txt = self.panel.txt_ctrl.GetValue()
current_name = self.model.name.value
if txt == "":
txt = current_name
self.panel.txt_ctrl.SetValue(txt)
if txt != current_name:
txt = make_unique_name(txt, [project.name.value for project in self._tab_data.projects.value])
logging.debug("Renaming project from %s to %s.", self.model.name.value, txt)
self.model.name.value = txt
self.panel.txt_ctrl.SetValue(txt)
evt.Skip()
def _on_btn_roa(self, _):
# Two-step process: Instantiate FastEM object here, but wait until first ROI is selected until
# further processing. The process can still be aborted by clicking in the viewport without dragging.
# In the callback to the ROI, the ROI creation will be completed or aborted.
self._project_bar.enable_buttons(False)
# Deactivate all ROAs
for roa_ctrl in self.roa_ctrls.values():
roa_ctrl.overlay.active.value = False
# Minimum index that has not yet been deleted, find the first index which is not in the existing indices
num = next(idx for idx, n in enumerate(sorted(self.roa_ctrls.keys()) + [0], 1) if idx != n)
name = "ROA-%s" % num
name = make_unique_name(name, [roa.name.value for roa in self.model.roas.value])
# better guess for parameters after region is selected in _add_roa_ctrl
roa_ctrl = FastEMROAController(name, None, self.colour, self._tab_data, self.panel, self._view_ctrl)
self.roa_ctrls[num] = roa_ctrl
self._current_roa_ctrl = roa_ctrl
roa_ctrl.model.coordinates.subscribe(self._add_roa_ctrl)
def _on_btn_remove(self, _, roa_ctrl):
self.remove_roa_ctrl(roa_ctrl)
def _add_roa_ctrl(self, coords):
roa_ctrl = self._current_roa_ctrl
self._current_roa_ctrl = None
roa_ctrl.model.coordinates.unsubscribe(self._add_roa_ctrl)
# Abort ROI creation if nothing was selected
if coords == acqstream.UNDEFINED_ROI:
logging.debug("Aborting ROA creation.")
self._view_ctrl.viewports[0].canvas.remove_roa_overlay(roa_ctrl.overlay)
self.roa_ctrls = {key: val for key, val in self.roa_ctrls.items() if val != roa_ctrl}
else:
# Create the panel
roa_ctrl.create_panel()
# Improve parameters guess
num = self._find_closest_scintillator(coords)
roa_ctrl.model.roc.value = self._tab_data.calibration_regions.value[num]
# Add the ROA model to project model
self.model.roas.value.append(roa_ctrl.model)
# Callback to ROI remove button
roa_ctrl.panel.btn_remove.Bind(wx.EVT_BUTTON, lambda evt: self._on_btn_remove(evt, roa_ctrl))
# Enable buttons of project bar
self._project_bar.enable_buttons(True)
# Should be called from GUI main thread
def remove_roa_ctrl(self, roa_ctrl):
# Public function, so it can officially be called from the projectbar controller
logging.debug("Removing ROA '%s' of project '%s'.", roa_ctrl.model.name.value, self.model.name.value)
# Remove panel
roa_ctrl.panel.Destroy()
self.panel.fit_panels()
# Remove controller from .roa_ctrl list
self.roa_ctrls = {key: val for key, val in self.roa_ctrls.items() if val != roa_ctrl}
# Remove overlay
self._view_ctrl.viewports[0].canvas.remove_roa_overlay(roa_ctrl.overlay)
# Remove model
self.model.roas.value.remove(roa_ctrl.model)
# Destroy ROAController object
del roa_ctrl
def _find_closest_scintillator(self, coordinates):
"""
Given coordinates coords, find the closest scintillator.
coordinates (float, float, float, float): l, t, r, b coordinates in m
return (int): name (key) of closest scintillator in ._tab_data.scintillator_positions dict
"""
roi_x, roi_y = (coordinates[2] + coordinates[0]) / 2, (coordinates[1] + coordinates[3]) / 2
mindist = 1 # distances always lower 1
closest = None
for num, (sc_x, sc_y) in self._tab_data.main.scintillator_positions.items():
# scintillators are rectangular, use maximum instead of euclidean distance
dist = max(abs(roi_x - sc_x), abs(roi_y - sc_y))
if dist < mindist:
mindist = dist
closest = num
return closest
class FastEMROAController(object):
"""
Controller for a single region of acquisition.
"""
def __init__(self, name, roc, colour, tab_data, project_panel, view_ctrl):
"""
name (str): default name for the ROA
roc (FastEMROC): region of calibration
colour (str): hexadecimal colour code for the bounding box of the roas in the viewport
tab_data (FastEMAcquisitionGUIData): tab data model
project_panel (FastEMProjectPanel): project panel
view_ctrl (FastEMAcquisitionViewport): viewport controller
"""
self._tab_data = tab_data
self._project_panel = project_panel
self._view_ctrl = view_ctrl
self.model = odemis.acq.fastem.FastEMROA(name, acqstream.UNDEFINED_ROI, roc,
self._tab_data.main.asm, self._tab_data.main.multibeam,
self._tab_data.main.descanner, self._tab_data.main.mppc)
self.model.coordinates.subscribe(self._on_coordinates)
self.model.roc.subscribe(self._on_roc)
# The panel is not created on initialization to allow for cancellation of the ROA creation
# (cf discussion in FastEMProjectController), create panel with .create_panel().
self.panel = None
logging.debug("Creating overlay for ROA '%s'.", name)
canvas = self._view_ctrl.viewports[0].canvas
self.overlay = canvas.add_roa_overlay(self.model.coordinates, colour)
self.overlay.active.subscribe(self._on_overlay_active)
def create_panel(self):
"""
Create a panel, add it to the project panel and subscribe to controls. Should only be called once.
"""
logging.debug("Creating panel for ROA %s.", self.model.name.value)
self.panel = FastEMROAPanel(self._project_panel, self.model.name.value,
["Calibration %s" % c for c in sorted(self._tab_data.main.scintillator_positions)])
self._project_panel.add_roa_panel(self.panel)
self.panel.calibration_ctrl.Bind(wx.EVT_COMBOBOX, self._on_combobox)
self.panel.txt_ctrl.Bind(wx.EVT_KILL_FOCUS, self._on_text)
self.panel.txt_ctrl.Bind(wx.EVT_TEXT_ENTER, self._on_text)
def _on_overlay_active(self, active):
if self.panel:
if active:
logging.debug("Activating ROA '%s'.", self.model.name.value)
self.panel.activate()
else:
logging.debug("Deactivating ROA '%s'.", self.model.name.value)
self.panel.deactivate()
def _on_coordinates(self, coords):
# Purely for logging
logging.debug("ROA '%s' coordinates changed to %s.", self.model.name.value, coords)
def _on_roc(self, roc):
# Update calibration control
logging.debug("ROA calibration changed to %s.", roc.name.value)
if self.panel:
self.panel.calibration_ctrl.SetSelection(int(roc.name.value) - 1) # counting starts at 0
def _on_combobox(self, _):
num = self.panel.calibration_ctrl.GetSelection() + 1
self.model.roc.value = self._tab_data.calibration_regions.value[num]
logging.debug("ROA calibration changed to %s.", self.model.roc.value.name.value)
def _on_text(self, evt):
txt = self.panel.txt_ctrl.GetValue()
current_name = self.model.name.value
# Process input, make sure name is unique in project and complies with whitelisted characters of
# technolution driver
roa_project = [p for p in self._tab_data.projects.value if self.model in p.roas.value][0]
all_project_roas = [roa.name.value for roa in roa_project.roas.value]
if txt == "":
txt = current_name
self.panel.txt_ctrl.SetValue(txt)
if txt != current_name:
txt = make_unique_name(txt, all_project_roas)
logging.debug("Renaming ROA from %s to %s.", self.model.name.value, txt)
self.model.name.value = txt
self.panel.txt_ctrl.SetValue(txt)
evt.Skip()
class FastEMCalibrationController(object):
"""
Listens to the calibration buttons and creates the FastEMROCControllers accordingly.
"""
def __init__(self, tab_data, calibration_bar, view_ctrl):
"""
tab_data (FastEMAcquisitionGUIData): tab data model
calibration_bar (FastEMCalibrationBar): main calibration panel
view_ctrl (FastEMAcquisitionViewport): viewport controller
"""
self._view_ctrl = view_ctrl
self._calibration_bar = calibration_bar
self._tab_data = tab_data
self.panel = FastEMCalibrationPanel(calibration_bar, tab_data.main.scintillator_layout)
calibration_bar.add_calibration_panel(self.panel)
self.roc_ctrls = {} # int --> FastEMROCController
for btn in self.panel.buttons.values():
btn.Bind(wx.EVT_BUTTON, self._on_button)
btn.Enable(False) # disabled by default, need to select scintillator in chamber tab first
# Only enable buttons for scintillators which have been selected in the chamber tab
tab_data.main.active_scintillators.subscribe(self._on_active_scintillators)
def _on_button(self, evt):
btn = evt.GetEventObject()
btn.SetLabel("OK")
btn.SetForegroundColour(wx.GREEN)
num = [num for num, b in self.panel.buttons.items() if b == btn][0]
if num not in self.roc_ctrls:
# Create new calibration controller
logging.debug("Adding ROC controller %s.", num)
roc_ctrl = FastEMROCController(num, self._tab_data, self._view_ctrl)
self.roc_ctrls[num] = roc_ctrl
else:
# Zoom to existing calibration region
roc_ctrl = self.roc_ctrls[num]
roc_ctrl.fit_view_to_bbox()
@call_in_wx_main
def _on_active_scintillators(self, scintillators):
for num, b in self.panel.buttons.items():
if num in scintillators:
b.Enable(True)
else:
b.Enable(False)
class FastEMROCController(object):
"""
Controller for a single region of calibration.
"""
def __init__(self, number, tab_data, view_ctrl):
"""
number (int): number of the calibration region
tab_data (FastEMAcquisitionGUIData): tab data model
view_ctrl (FastEMAcquisitionViewport): viewport controller
"""
self._view_ctrl = view_ctrl
self._tab_data = tab_data
# By default, the calibration region is in the center of the scintillator. The size is given by
# the ebeam resolution.
pos = tab_data.main.scintillator_positions[number]
sz = (tab_data.main.ebeam.resolution.value[0] * tab_data.main.ebeam.pixelSize.value[0],
tab_data.main.ebeam.resolution.value[1] * tab_data.main.ebeam.pixelSize.value[1])
l = pos[0] - 0.5 * sz[0]
t = pos[1] + 0.5 * sz[1]
r = pos[0] + 0.5 * sz[0]
b = pos[1] - 0.5 * sz[1]
# Get ROC model (exists already in tab data) and change coordinates
self.model = tab_data.calibration_regions.value[number]
self.model.coordinates.value = (l, t, r, b)
self.model.coordinates.subscribe(self._on_coordinates)
# Add overlay
self.overlay = view_ctrl.viewports[0].canvas.add_calibration_overlay(self.model.coordinates, number)
def fit_view_to_bbox(self):
"""
Zoom in to calibration region. Calibration region is in the center and takes up 1/100 of viewport area.
"""
logging.debug("Zooming in to calibration region %s.", self.model.name.value)
cnvs = self._view_ctrl.viewports[0].canvas
l, t, r, b = self.model.coordinates.value
sz = (self._tab_data.main.ebeam.resolution.value[0] * self._tab_data.main.ebeam.pixelSize.value[0],
self._tab_data.main.ebeam.resolution.value[1] * self._tab_data.main.ebeam.pixelSize.value[1])
cnvs.fit_to_bbox([l - 5 * sz[0], t - 5 * sz[1], r + 5 * sz[0], b + 5 * sz[1]])
def _on_coordinates(self, coords):
# Purely for logging
logging.debug("ROC '%s' coordinates changed to %s.", self.model.name.value, coords)
|
delmic/odemis
|
src/odemis/gui/cont/streams.py
|
Python
|
gpl-2.0
| 149,861
|
[
"Gaussian"
] |
84845e9045fedcc952ce7cf5f3c696507cb5aa0762b021b35ea9f7cfe3789e0d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package implements various diffraction analyses.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "5/22/14"
|
fraricci/pymatgen
|
pymatgen/analysis/diffraction/__init__.py
|
Python
|
mit
| 366
|
[
"pymatgen"
] |
cac77d46358f95ab3c9333c2a678061f7636a112a20cf0449e8841d05e93f61c
|
"""
PLY-based parser for Myfile grammar.
"""
from __future__ import absolute_import, division, print_function
from mybuild._compat import *
import functools
import itertools
import ply.yacc
from collections import namedtuple, OrderedDict
from mybuild.lang import lex, x_ast as ast
from mybuild.lang.helpers import rule
from mybuild.lang.location import Fileinfo, Location
from mybuild.util.operator import getter
__author__ = "Eldar Abusalimov"
__date__ = "2013-07-05"
# Runtime intrinsics and internal auxiliary names.
MY_NEW_TYPE = '__my_new_type__'
MY_NEW_NAMESPACE = '__my_new_namespace__'
MY_CALL_ARGS = '__my_call_args__'
MY_EXEC_MODULE = '__my_exec_module__'
DFL_TYPE_NAME = '_'
CLS_ARG = 'cls'
SELF_ARG = 'self'
_RESULT_TMP = '<tmp>'
_AUX_NAME_FMT = '<aux-{0}-{1}>'
_AUX_VAR_NAME_FMT = '<aux-{0}-{1}>'
_MODULE_EXEC = '<trampoline>'
_MODULE_NAME = '<module>'
# Location tracking.
def node_loc(ast_node, p):
return Location.from_ast_node(ast_node, p.lexer.fileinfo)
def ploc(p, i=1):
return Location(p.lexer.fileinfo, p.lineno(i), p.lexpos(i))
def set_loc(ast_node, loc):
return loc.init_ast_node(ast_node)
def set_loc_p(ast_node, p, i=1):
return set_loc(ast_node, ploc(p, i))
copy_loc = ast.copy_location
def wloc(func):
@functools.wraps(func)
def decorated(p, *symbols):
return func(p, *symbols), ploc(p)
return decorated
def rule_wloc(func):
return rule(wloc(func))
# AST fragments builders.
class Binding(namedtuple('Binding', 'qualname, name_locs, func, is_static')):
"""docstring for Binding"""
__slots__ = ()
def __str__(self):
return '.'.join(self.qualname)
def name_builder(name):
def builder(expr=None):
if expr is not None:
return ast.Attribute(expr, name, ast.Load())
else:
return ast.x_Name(name)
return builder
def build_node(builder_wloc, expr=None):
builder, loc = builder_wloc
if not callable(builder):
builder = name_builder(builder)
return set_loc(builder(expr) if expr is not None else builder(), loc)
def build_chain(builder_wlocs, expr=None):
for builder_wloc in builder_wlocs:
expr = build_node(builder_wloc, expr)
return expr
def groupby_name(bindings, tier=0):
"""
Groups bindings by the name fragment specified by tier. Input bindings
can be not sorted. The order of the bindings remains initial in scope of
its group.
Yields:
(name, [binding(s)...]) tuples.
"""
res = OrderedDict()
for binding in bindings:
name_at_tier = binding.qualname[tier]
if name_at_tier in res:
res[name_at_tier].append(binding)
else:
res[name_at_tier] = [binding]
return res
def build_namespace_recursive(bindings, tier):
keywords = []
assert bindings, "A group must not be empty"
if len(bindings[0].qualname) == tier:
if len(bindings) > 1:
loc = bindings[1].name_locs[-1]
raise MySyntaxError('Namespace element repeated', loc)
return bindings[0].func
for name, group in iteritems(groupby_name(bindings, tier)):
value_ast = build_namespace_recursive(group, tier+1)
loc = group[0].name_locs[tier]
keyword = set_loc(ast.keyword(name, value_ast), loc)
keywords.append(keyword)
return ast.x_Call(ast.x_Name(MY_NEW_NAMESPACE), keywords=keywords)
def assign_funcs_to_variables(bblock, bindings):
for binding in bindings:
var = bblock.new_aux_var_name(binding.qualname[-1])
value = ast.x_Call(binding.func, [ast.x_Name(SELF_ARG)])
transformer = AssigningTransformer(var)
stmt = transformer.transform_expr(value)
bblock.append(stmt)
yield Binding(binding.qualname, binding.name_locs,
ast.x_Name(var), binding.is_static)
def fold_into_namespace(p, bindings):
if len(bindings) == 1 and len(bindings[0].qualname) == 1:
return bindings[0].func
bblock = BuildingBlock(p.parser.bblock)
bindings = list(assign_funcs_to_variables(bblock, bindings))
stmt = ast.Expr(build_namespace_recursive(bindings, 1))
bblock.append(stmt)
return bblock.fold_into_binding()
def fold_bindings(p, bindings):
"""
Folds bindings so that each name matches a correponding namespace.
Returns:
An AST structure: [(name, func, is_static)*].
"""
binding_asts = []
for name, group in iteritems(groupby_name(bindings)):
func = fold_into_namespace(p, group)
loc = group[0].name_locs[0]
name_str = set_loc(ast.Str(name), loc)
triple = [name_str, func, ast.x_Const(False)]
binding_asts.append(ast.Tuple(triple, ast.Load()))
return ast.List(binding_asts, ast.Load())
def build_typedef(p, body, metatype, qualname=None, call_builder=None):
# metatype { ... } ->
# __my_new_type__(metatype, '_', <module>, [...])
#
# metatype qualname { ... } ->
# __my_new_type__(metatype, 'qualname', <module>, [...])
#
# metatype qualname(...) { ... } ->
# __my_new_type__(metatype, 'qualname', <module>, [...],
# *__my_call_args__(...))
assert len(body) == 2, "body must be a tuple of (doc_str, bindings)"
if qualname is not None:
name = '.'.join(name for name, loc in qualname)
else:
name = DFL_TYPE_NAME
doc_str, bindings = body
binding_list = fold_bindings(p, bindings)
args = [metatype, ast.Str(name), ast.x_Name(_MODULE_NAME),
doc_str, binding_list]
starargs = None
if call_builder is not None:
starargs = build_node(call_builder, ast.x_Name(MY_CALL_ARGS))
# but:
if not (starargs.args or
starargs.keywords or
starargs.starargs or
starargs.kwargs):
starargs = None # optimize out
ret_call = ast.x_Call(ast.x_Name(MY_NEW_TYPE), args, starargs=starargs)
return copy_loc(ret_call, metatype)
# Dealing with statements.
class BuildingBlock(object):
"""Building Block encapsulates a sequence of statements."""
def __init__(self, parent=None):
super(BuildingBlock, self).__init__()
self.parent = parent
self.stmts = []
self.aux_cnt = 0
self.aux_var_cnt = 0
if parent is not None:
self.depth = parent.depth + 1
else:
self.depth = 0
@property
def docstring_stmt(self):
if (self.stmts and
isinstance(self.stmts[0], ast.Expr) and
isinstance(self.stmts[0].value, ast.Str)):
return self.stmts[0]
def insert(self, index, *stmts):
self.stmts[index:index] = stmts
def append(self, *stmts):
self.stmts.extend(stmts)
def make_returning(self):
ReturningTransformer().modify_stmts_list(self.stmts)
def make_assigning(self, name=_RESULT_TMP):
AssigningTransformer(name).modify_stmts_list(self.stmts)
return ast.x_Name(name)
def new_aux_var_name(self, name):
cnt = self.aux_var_cnt
self.aux_var_cnt = cnt + 1
return _AUX_VAR_NAME_FMT.format(name, cnt)
def new_aux_name(self):
cnt = self.aux_cnt
self.aux_cnt = cnt + 1
return _AUX_NAME_FMT.format(self.depth, cnt)
def build_func_from(self, stmts, arguments, name=None):
if name is None:
name = self.new_aux_name()
self.append(ast.x_FunctionDef(name, arguments, stmts))
return ast.x_Name(name)
def fold_into_func(self, arguments, name=None):
self.make_returning()
return self.parent.build_func_from(self.stmts, arguments, name)
def fold_into_binding(self, is_static=False):
args = [ast.x_arg(CLS_ARG if is_static else SELF_ARG)]
return self.fold_into_func(ast.x_arguments(args))
class ResultingTransformer(ast.NodeTransformer):
def modify_stmts_list(self, stmts):
if stmts:
value = self.visit(stmts.pop())
else:
value = self.create_noresult()
if isinstance(value, ast.AST):
stmts.append(value)
elif value is not None:
stmts.extend(value)
return stmts
def visit_FunctionDef(self, node):
raise ValueError('Unexpected FunctionDef as the last stmt of bblock')
def visit_Expr(self, node):
return copy_loc(self.transform_expr(node.value), node)
def visit_If(self, node):
for bblock in node.body, node.orelse:
self.modify_stmts_list(bblock)
return node
def visit_Return(self, node):
return node
def noresult_visit(self, node):
return [node] + self.modify_stmts_list([])
visit_Delete = noresult_visit
visit_Assign = noresult_visit
visit_AugAssign = noresult_visit
visit_Pass = noresult_visit
def create_noresult(self):
return self.transform_expr(ast.x_Const(None))
def transform_expr(self, expr):
raise NotImplementedError
class ReturningTransformer(ResultingTransformer):
def create_noresult(self):
# This is the last stmt anyway, 'return None' is implied.
return None
def transform_expr(self, expr):
return ast.Return(expr)
class AssigningTransformer(ResultingTransformer):
def __init__(self, name):
super(AssigningTransformer, self).__init__()
self.name = name
def transform_expr(self, expr):
return ast.Assign([ast.Name(self.name, ast.Store())], expr)
def emit_stmt(p, *stmts):
p.parser.bblock.append(*stmts)
def push_new_bblock(p):
p.parser.bblock = BuildingBlock(p.parser.bblock)
def pop_bblock(p):
bblock = p.parser.bblock
p.parser.bblock = bblock.parent
return bblock
# Here go grammar definitions for PLY.
tokens = lex.tokens
def p_new_bblock(p):
"""new_bblock :"""
push_new_bblock(p)
@rule
def p_exec_start(p, docstring_bindings=-1):
"""exec_start : new_bblock typesuite"""
# stmts... ->
#
# try:
# @__my_exec_module__
# def __suite():
# global __name__
# <module> = __name__
# ...
# return [...]
#
# except __my_exec_module__:
# pass
#
# N.B. This voodoo is to avoid storing __suite name into global module
# dict. Applied as a decorator, __my_exec_module__ executes a function
# being decorated (__suite in this case) and throws 'itself' instead
# of returning as normal.
# Likewise any auxiliary function is defined local to the __suite.
#
doc_str, bindings = docstring_bindings
binding_list = fold_bindings(p, bindings)
bblock = pop_bblock(p)
bblock.insert(0,
ast.Global(['__name__']),
ast.Assign([ast.Name(_MODULE_NAME, ast.Store())],
ast.x_Name('__name__')))
bblock.append(ast.Return(binding_list))
suite_func = ast.x_FunctionDef(_MODULE_EXEC, ast.x_arguments(),
bblock.stmts,
decos=[ast.x_Name(MY_EXEC_MODULE)])
eh_stmt = ast.ExceptHandler(ast.x_Name(MY_EXEC_MODULE), None, [ast.Pass()])
try_stmt = ast.x_TryExcept([suite_func], [eh_stmt])
module_body = [try_stmt]
if isinstance(doc_str, ast.Str):
module_body.insert(0, ast.Expr(doc_str))
return ast.Module(module_body)
@rule
def p_typebody(p, docstring_bindings=2, typeret_func=-1):
"""typebody : LBRACE typesuite RBRACE typeret"""
return docstring_bindings
@rule
def p_typeret(p):
"""typeret : """
return None # stub for further devel
@rule
def p_stmtexpr(p, value):
"""stmtexpr : test"""
emit_stmt(p, copy_loc(ast.Expr(value), value))
@rule
def p_typesuite(p, bindings_list=-1):
"""typesuite : skipnl typestmts"""
if bindings_list and not isinstance(bindings_list[0], list):
# We don't want a docstring to have location, because otherwise
# CPython (debug) crashes with some lineno-related assertion failure.
# That is why a builder is invoked directly, not through build_node.
doc_builder, doc_loc = bindings_list.pop(0)
doc_str = doc_builder()
else:
doc_str = ast.x_Const(None)
bindings = list(itertools.chain.from_iterable(bindings_list))
return doc_str, bindings
@rule # target1: { ... }
def p_typestmt_namespace(p, qualname_wlocs=3, colons=4, body=-1):
"""typestmt : new_bblock nl_off qualname colons nl_on typebody"""
bblock = pop_bblock(p)
emit_stmt(p, *bblock.stmts)
qualname, name_locs = map(tuple, zip(*qualname_wlocs))
bindings = body[1]
for binding in bindings:
binding.qualname[:0] = qualname
binding.name_locs[:0] = name_locs
return bindings
@rule
def p_typestmt(p, qualname_colons=2):
"""typestmt : new_bblock binding"""
bblock = pop_bblock(p)
qualname_wlocs, colons = qualname_colons
is_static = (colons == '::')
func = bblock.fold_into_binding(is_static)
qualname, name_locs = map(list, zip(*qualname_wlocs))
binding_triple = Binding(qualname, name_locs,
func, ast.x_Const(is_static))
return [binding_triple]
@rule # metatype target(): { ... }
def p_binding_typedef(p, metatype_builders=2, qualname=3, mb_call_builder=4,
colons=5, body=-1):
# Here qualname is used instead of pytest to work around
# a reduce/reduce conflict with simple binding (pytest/qualname).
"""binding : nl_off qualname qualname mb_call colons nl_on typebody"""
value = build_typedef(p, body, build_chain(metatype_builders),
qualname, mb_call_builder)
emit_stmt(p, copy_loc(ast.Expr(value), value))
return qualname, colons
@rule # target1: ...
def p_binding_simple(p, qualname=2, colons=3):
"""binding : nl_off qualname colons nl_on stmtexpr"""
return qualname, colons
@rule # : -> False, :: -> True
def p_colons(p, colons):
"""colons : COLON
colons : DOUBLECOLON"""
return colons
@rule
def p_test(p, test):
"""test : pytest
test : mystub"""
return test
@rule
def p_pytest(p, stub, builders):
"""pytest : pystub trailers
pytest : mystub trailers_plus"""
return build_chain(builders, stub)
@rule
def p_stub(p, builder):
"""pystub : name
pystub : pyatom
mystub : myatom"""
return build_node(builder)
@rule_wloc
def p_myatom_typedef(p, metatype, body):
"""myatom : pytest typebody"""
return lambda: build_typedef(p, body, metatype)
@rule_wloc
def p_myatom_typedef_named(p, metatype, qualname, mb_call_builder, body):
"""myatom : pytest qualname mb_call typebody"""
return lambda: build_typedef(p, body, metatype, qualname, mb_call_builder)
@rule_wloc
def p_pyatom_num(p, n):
"""pyatom : NUMBER"""
return lambda: ast.Num(n)
@rule
def p_pyatom_string(p, string):
"""pyatom : string"""
return string
@rule_wloc
def p_string(p, s):
"""string : STRING"""
return lambda: ast.Str(s)
@rule_wloc
def p_pyatom_parens_or_tuple(p, testlist=2): # (item, ...)
"""pyatom : LPAREN testlist RPAREN"""
test_l, test_el = testlist
if test_el is not None:
return lambda: test_el
else:
return lambda: ast.Tuple(test_l, ast.Load())
@rule_wloc
def p_pyatom_list(p, testlist=2): # [item, ...]
"""pyatom : LBRACKET testlist RBRACKET"""
test_l = testlist[0]
return lambda: ast.List(test_l, ast.Load())
@rule_wloc
def p_pyatom_dict(p, kv_pairs=2): # [key: value, ...], [:]
"""pyatom : LBRACKET dictents RBRACKET
pyatom : LBRACKET COLON RBRACKET"""
if kv_pairs != ':':
keys, values = map(list, zip(*kv_pairs))
else:
keys, values = [], []
return lambda: ast.Dict(keys, values)
@rule
def p_dictent(p, key, value=3):
"""dictent : test COLON test"""
return key, value
@rule
def p_trailer_call(p, call):
"""trailer : call
mb_call : call
mb_call : empty"""
return call
@rule_wloc
def p_call(p, kw_arg_pairs=2): # x(arg, kw=arg, ...)
"""call : LPAREN arguments RPAREN"""
args = [] # positional arguments
keywords = [] # keyword arguments
seen_kw = set()
for kw_wloc, arg in kw_arg_pairs:
if kw_wloc is None:
if seen_kw:
raise MySyntaxError('non-keyword arg after keyword arg',
node_loc(arg, p))
args.append(arg)
else:
kw, loc = kw_wloc
if kw in seen_kw:
raise MySyntaxError('keyword argument repeated', loc)
else:
seen_kw.add(kw)
keywords.append(set_loc(ast.keyword(kw, arg), loc))
return lambda expr: ast.x_Call(expr, args, keywords)
@rule
def p_argument_pos(p, value):
"""argument : test"""
return None, value
@rule
def p_argument_kw(p, key, value=3):
"""argument : ID EQUALS test"""
kw_wloc = key, ploc(p)
return kw_wloc, value
@rule_wloc
def p_trailer_attr_or_name(p, name=-1): # x.attr or name
"""trailer : PERIOD ID
name : ID"""
return name
@rule_wloc
def p_trailer_item(p, item=2): # x[item]
"""trailer : LBRACKET test RBRACKET"""
return lambda expr: ast.Subscript(expr, ast.Index(item), ast.Load())
# NIY
def p_trailer_multigetter(p): # x.[attr, [item], (call), ...]
"""trailer : PERIOD LBRACKET getters RBRACKET"""
raise NotImplementedError
def p_getter(p):
"""getter : name trailers"""
raise NotImplementedError
def p_trailer_multisetter(p): # x.[attr: value, [item]: value, ...]
"""trailer : PERIOD LBRACKET setters RBRACKET"""
raise NotImplementedError
def p_setter(p):
"""setter : name trailers COLON test"""
raise NotImplementedError
# testlist is a pair of [list of elements] and a single element (if any)
@rule
def p_testlist(p, l):
"""testlist : testlist_plus mb_comma"""
return l
@rule
def p_testlist_empty(p):
"""testlist :"""
return [], None
@rule
def p_testlist_single(p, el):
"""testlist_plus : test"""
return [el], el
@rule
def p_testlist_list(p, l_el, el=-1):
"""testlist_plus : testlist_plus COMMA test"""
l, _ = l_el
l.append(el)
return l, None
# generic (possibly comma-separated, and with trailing comma) list parsing
@rule
def p_list_head(p, el):
"""
qualname : name
typestmts_plus : string
typestmts_plus : typestmt
arguments_plus : argument
dictents_plus : dictent
getters_plus : getter
setters_plus : setter
"""
return [el]
@rule
def p_list_tail(p, l, el=-1):
"""
qualname : qualname PERIOD name
typestmts_plus : typestmts_plus stmtdelim typestmt
arguments_plus : arguments_plus COMMA argument
dictents_plus : dictents_plus COMMA dictent
getters_plus : getters_plus COMMA getter
setters_plus : setters_plus COMMA setter
trailers_plus : trailers trailer
"""
l.append(el)
return l
@rule
def p_list_alias(p, l):
"""
trailers : empty_list
trailers : trailers_plus
typestmts : typestmts_plus mb_stmtdelim
arguments : arguments_plus mb_comma
dictents : dictents_plus mb_comma
getters : getters_plus mb_comma
setters : setters_plus mb_comma
typestmts : empty_list
arguments : empty_list
getters : empty_list
"""
return l
@rule
def p_empty_list(p):
"""empty_list :"""
return []
def p_mb_comma(p):
"""mb_comma :
mb_comma : COMMA"""
# new line control and stuff
def p_nl_off(p):
"""nl_off :"""
p.lexer.ignore_newline_stack[-1] += 1
def p_nl_on(p):
"""nl_on :"""
# Work around a 'nl_on' preceding a token pushing to the
# ignore_newline_stack (aka 'ins' below).
# In this case the 'nl_on' gets reduced _after_ handling the token,
# and naive decreasing of the stack top would underflow it.
was_ins_pushing_token = (p.lexer.ignore_newline_stack[-1] == 0)
p.lexer.ignore_newline_stack[-1 - was_ins_pushing_token] -= 1
def p_skipnl(p):
"""skipnl :
skipnl : skipnl NEWLINE"""
def p_stmtdelim(p):
"""stmtdelim : mb_stmtdelim NEWLINE
stmtdelim : mb_stmtdelim SEMI"""
def p_mb_stmtdelim(p):
"""mb_stmtdelim :
mb_stmtdelim : stmtdelim"""
def p_empty(p):
"""empty :"""
def p_error(t):
if t is not None:
raise MySyntaxError("Unexpected {0!r} token".format(t.value),
lex.loc(t))
else:
raise MySyntaxError("Premature end of file")
# That's it!
parser = ply.yacc.yacc(start='exec_start',
errorlog=ply.yacc.NullLogger(), debug=False,
write_tables=False)
# The main entry point.
def my_parse(source, filename='<unknown>', mode='exec', **kwargs):
"""
Parses the given source and returns the result.
Args:
source (str): data to parse
filename (str): file name to report in case of errors
mode (str): type of input to expect:
it can be 'exec' only (constrained by design).
**kwargs are passed directly to the underlying PLY parser
Returns:
ast.Module object
Note:
This function is NOT reentrant.
"""
if mode != 'exec':
raise NotImplementedError("Only 'exec' mode is supported")
pr = parser
lx = lex.lexer.clone()
lx.fileinfo = Fileinfo(source, filename)
pr.bblock = None
try:
ast_root = pr.parse(source, lexer=lx, tracking=True, **kwargs)
return ast.fix_missing_locations(ast_root)
except MySyntaxError as e:
raise SyntaxError(*e.args)
finally:
del pr.bblock
class MySyntaxError(Exception):
"""Stub class for using instead of standard SyntaxError because the latter
has the special meaning for PLY.
Constructor args are treated in the same way as for SyntaxError."""
|
abusalimov/mybuild
|
mybuild/lang/parse.py
|
Python
|
mit
| 22,275
|
[
"VisIt"
] |
aca42f6bfa3b5a4f0a73cffdda31e566c10ffc8abbb336cb02965d3399f5db53
|
"""
Manage pools of connections so that we can limit the number of requests per site and reuse
connections.
@since: 1.6
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import sys
if sys.version_info[0] > 2:
from urllib import parse as urlparse # Python 3
else:
import urlparse
from collections import defaultdict
import threading
from zeroinstall import logger
from zeroinstall.support import tasks
from zeroinstall.injector import download
default_port = {
'http': 80,
'https': 443,
}
class DownloadStep(object):
url = None
status = None
redirect = None
class DownloadScheduler(object):
"""Assigns (and re-assigns on redirect) Downloads to Sites, allowing per-site limits and connection pooling.
@since: 1.6"""
def __init__(self):
self._sites = defaultdict(lambda: Site()) # (scheme://host:port) -> Site
@tasks.async
def download(self, dl, timeout = None):
"""@type dl: L{zeroinstall.injector.download.Download}"""
# (changed if we get redirected)
current_url = dl.url
redirections_remaining = 10
original_exception = None
# Assign the Download to a Site based on its scheme, host and port. If the result is a redirect,
# reassign it to the appropriate new site. Note that proxy handling happens later; we want to group
# and limit by the target site, not treat everything as going to a single site (the proxy).
while True:
location_parts = urlparse.urlparse(current_url)
site_key = (location_parts.scheme,
location_parts.hostname,
location_parts.port or default_port.get(location_parts.scheme, None))
step = DownloadStep()
step.dl = dl
step.url = current_url
blocker = self._sites[site_key].download(step, timeout)
yield blocker
try:
tasks.check(blocker)
except download.DownloadError as ex:
if original_exception is None:
original_exception = ex
else:
logger.warning("%s (while trying mirror)", ex)
mirror_url = step.dl.get_next_mirror_url()
if mirror_url is None:
raise original_exception
# Try the mirror.
# There are actually two places where we try to use the mirror: this one
# looks to see if we have an exact copy of same file somewhere else. If this
# fails, Fetcher will also look for a different archive that would generate
# the required implementation.
logger.warning("%s: trying archive mirror at %s", ex, mirror_url)
step.redirect = mirror_url
redirections_remaining = 10
if not step.redirect:
break
current_url = step.redirect
if redirections_remaining == 0:
raise download.DownloadError("Too many redirections {url} -> {current}".format(
url = dl.url,
current = current_url))
redirections_remaining -= 1
# (else go around the loop again)
MAX_DOWNLOADS_PER_SITE = 5
def _spawn_thread(step):
"""@type step: L{DownloadStep}
@rtype: L{zeroinstall.support.tasks.Blocker}"""
from ._download_child import download_in_thread
thread_blocker = tasks.Blocker("wait for thread " + step.url)
def notify_done(status, ex = None, redirect = None):
step.status = status
step.redirect = redirect
def wake_up_main():
child.join()
thread_blocker.trigger(ex)
return False
tasks.get_loop().call_soon_threadsafe(wake_up_main)
child = threading.Thread(target = lambda: download_in_thread(step.url, step.dl.tempfile, step.dl.modification_time, notify_done))
child.daemon = True
child.start()
return thread_blocker
class Site(object):
"""Represents a service accepting download requests. All requests with the same scheme, host and port are
handled by the same Site object, allowing it to do connection pooling and queuing, although the current
implementation doesn't do either."""
def __init__(self):
self.queue = []
self.active = 0
@tasks.async
def download(self, step, timeout = None):
"""
Queue up this download. If it takes too long, trigger step.dl.timeout (if any), but
only count time spent actually downloading, not time spent queuing.
@type step: L{DownloadStep}"""
if self.active == MAX_DOWNLOADS_PER_SITE:
# Too busy to start a new download now. Queue this one and wait.
ticket = tasks.Blocker('queued download for ' + step.url)
self.queue.append(ticket)
yield ticket, step.dl._aborted
if step.dl._aborted.happened:
raise download.DownloadAborted()
in_progress = [True]
if timeout is not None:
def timeout_cb():
if in_progress:
step.dl.timeout.trigger()
tasks.get_loop().call_later(timeout, timeout_cb)
# Start a new thread for the download
thread_blocker = _spawn_thread(step)
self.active += 1
# Wait for thread to complete download.
yield thread_blocker, step.dl._aborted
del in_progress[0]
self.active -= 1
if self.active < MAX_DOWNLOADS_PER_SITE:
self.process_next() # Start next queued download, if any
if step.dl._aborted.happened:
# Don't wait for child to finish (might be stuck doing IO)
raise download.DownloadAborted()
tasks.check(thread_blocker)
if step.status == download.RESULT_REDIRECT:
assert step.redirect
return # DownloadScheduler will handle it
assert not step.redirect, step.redirect
step.dl._finish(step.status)
def process_next(self):
assert self.active < MAX_DOWNLOADS_PER_SITE
if self.queue:
nxt = self.queue.pop()
nxt.trigger()
|
rammstein/0install
|
zeroinstall/injector/scheduler.py
|
Python
|
lgpl-2.1
| 5,374
|
[
"VisIt"
] |
843fefcae20378440f7a150a3e46ff11905beea19d33d5054bb29d2c36da1dc7
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015-2017 Jonathan Taquet
This file is part of Oe2sSLE (Open e2sSample.all Library Editor).
Oe2sSLE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Oe2sSLE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Oe2sSLE. If not, see <http://www.gnu.org/licenses/>
"""
import tkinter as tk
import webbrowser
from version import Oe2sSLE_VERSION
class AboutDialog(tk.Toplevel):
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.transient(parent)
self.title('About Open e2sSample.all Library Editor')
self.resizable(width=tk.FALSE, height=tk.FALSE)
body = tk.Frame(self)
self.text = tk.Text(body,state=tk.NORMAL,width=80)
self.text.pack()
body.pack(padx=5, pady=5)
text = self.text
text.config(cursor="arrow")
text.insert(tk.INSERT,"Oe2sSLE "+str(Oe2sSLE_VERSION[0])+"."+str(Oe2sSLE_VERSION[1])+"."+str(Oe2sSLE_VERSION[2])+"\n")
text.insert(tk.END,
"""
The Home of this application is its GitHub repository.
To contribute or support, visit """)
text.tag_config("link-github", foreground="blue", underline=1)
text.tag_bind("link-github", "<Button-1>", lambda event: webbrowser.open('https://github.com/JonathanTaquet/Oe2sSLE/'))
text.insert(tk.END, "<https://github.com/JonathanTaquet/Oe2sSLE/>", "link-github")
text.insert(tk.END,
"""
Copyright (C) 2015-2017 Jonathan Taquet
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see """)
text.tag_config("link-gpl", foreground="blue", underline=1)
text.tag_bind("link-gpl", "<Button-1>", lambda event: webbrowser.open('http://www.gnu.org/licenses/'))
text.insert(tk.END, "<http://www.gnu.org/licenses/>", "link-gpl")
text.config(state=tk.DISABLED)
self.focus_set()
self.grab_set()
self.wait_window(self)
|
JonathanTaquet/Oe2sSLE
|
GUI/about_dialog.py
|
Python
|
gpl-2.0
| 2,870
|
[
"VisIt"
] |
75094c52a6d374d8c77e980f439a21ec98af47a4e6957154e97bddce6e36eab0
|
'''
Example of a node that doesn't use Brian at all!
Instead of doing any simulation using Brian, this node only receives and sends back spike trains according to the
formatting used by the BrianConnectUDP.
'''
import numpy
import select
import time
from brian_multiprocess_udp import BrianConnectUDP
my_inputclock_dt = 2
def redirect_spikes(spikes_pipe_in, spikes_pipe_out):
"""
This function substitutes the run_brian_simulation and makes the system behaves like a node/router simply
redirecting packets or receiving multiple packets and converting to a big one.
"""
while True:
if select.select([spikes_pipe_in],[],[]):
t_init = time.time()
spikes_pipe_out.send(numpy.array([index for index,value in enumerate(spikes_pipe_in.recv().tolist()) if value==1]))
time.sleep((my_inputclock_dt/1000.0)-(time.time()-t_init)) # Keeps the same clock defined by the class call.
# This could be useful to keep all the nodes with the
# same density of spikes arriving (if they use similar
# values to the inputclock_dt)
if __name__=="__main__":
my_simulation = BrianConnectUDP(None, NumOfNeuronsInput=100, NumOfNeuronsOutput=100,
input_addresses=[("127.0.0.1", 14141, 40),("127.0.0.1", 16161, 60)],
output_addresses=[("127.0.0.1", 18181)],
inputclock_dt=my_inputclock_dt, TotalSimulationTime=10000, brian_bypass=redirect_spikes, brian_address=2)
|
ricardodeazambuja/BrianConnectUDP
|
examples/Brian_Bypass_Node.py
|
Python
|
cc0-1.0
| 1,658
|
[
"Brian"
] |
e2b5c71c9df23b69b1adf5ad7459c5509003fe0bdc8ac9ed9dbef3be90f9533f
|
#! /usr/bin/env python
# Copyright 2005 Progeny Linux Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Sam Hart
import os
import fnmatch
import commands
import re
import getopt
import sys
import rpmdiff_lib
import progress_bar
def list_files(root, patterns='*', recurse=1, return_folders=0):
"""List all the files in a directory"""
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list, return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def grab_rpm_files(directory, recurse=0, binpkg=1, srcpkg=0):
"""Given a directory, pull all the rpms from it"""
rpm_files = []
if srcpkg:
for filename in list_files(directory, '*.srpm;*.src.rpm', recurse):
rpm_files.append(filename)
if binpkg:
for filename in list_files(directory, '*.rpm', recurse):
rpm_files.append(filename)
return rpm_files
def process(rpm_to_dir, rpm_from_dir, mirror, arch, verbose, recurse, progress):
if verbose: print ">process(..)"
error_log = []
col = commands.getoutput("echo \"$COLUMNS\"")
try:
columns = int(col)
except:
columns = 60
pb = progress_bar.pb("Progress: ", "-", columns, sys.stderr)
rpm_from_files = grab_rpm_files(rpm_from_dir, recurse)
rpm_to_files = grab_rpm_files(rpm_to_dir, recurse)
rpmdiff_inst = rpmdiff_lib.rpmdiff(verbose, progress, pb)
rpm_from_dict = rpmdiff_inst.generate_rpm_dict(rpm_from_files)
rpm_to_dict = rpmdiff_inst.generate_rpm_dict(rpm_to_files)
i = 0.0
for rpm in rpm_from_dict.keys():
i = i + 1.0
if progress:
percentage = i / len(rpm_from_dict.keys())
pb.progress(percentage)
# check if the new arch component exists
if not rpm_to_dict.has_key(rpm):
# First, attempt to grab from mirror
new_arch_file = rpm + "-" + rpm_from_dict[rpm]['version'] + "-" + rpm_from_dict[rpm]['release'] + "." + arch + ".rpm"
path = rpm_to_dir + "/" + new_arch_file
full_url = mirror + "/" + new_arch_file
cmd = ("wget -O \"%s\" %s") % (path, full_url)
if verbose: print ">> " + cmd
output = commands.getoutput(cmd)
if not os.path.exists(path):
# Okay, download didn't work, let's just copy the file over and log it
cmd = ("cp %s %s/.") % (rpm_from_dict[rpm]['filename'], rpm_to_dir)
if verbose: print">> File could not download, default to copy original"
output = commands.getoutput(cmd)
error_log.append("Problem with %s for %s arch. Could not find in mirror." % (rpm, arch))
if len(error_log):
print >> sys.stderr, "ERRORS\n"
i = 0
for line in error_log:
i += 1
print "[%d] %s" % (i, line)
def usage():
print "get-arch.py -"
print " Given a pile of RPMs for a given arch, will verify that"
print "associated RPMs for another arch are present. If not, will"
print "attempt to download them."
print "\nUSAGE:"
print " get-arch.py [options] <ARCH2> <RPM_ARCH1_DIR> <RPM_ARCH2_DIR> <MIRROR>"
print "\nWhere [options] may be one of the following:"
print "\t-r | --recursive\tRecursively import"
print "\t-v | --verbose\tBe verbose in processing"
print "\t-p | --progress\tShow progress bar (invalid without -l)"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "rvp", ["recursive", "verbose", "progress"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
verbose = 0
recurse = 0
progress = 0
for o, a in opts:
if o in ("-v", "--verbose"):
verbose = 1
if o in ("-r", "--recursive"):
recurse = 1
if o in ("-p", "--progress"):
progress = 1
try:
mirror = sys.argv[-1]
rpm_to_dir = sys.argv[-2]
rpm_from_dir = sys.argv[-3]
arch = sys.argv[-4]
except:
usage()
sys.exit(2)
process(rpm_to_dir, rpm_from_dir, mirror, arch, verbose, recurse, progress)
if __name__ == "__main__":
main()
# vim:set ai et sts=4 sw=4 tw=80:
|
blipvert/rpmstrap
|
tools/rpm_get-arch.py
|
Python
|
gpl-2.0
| 5,634
|
[
"VisIt"
] |
15696a9724b280d4a0b07bcdee8b7f7fb9d6bb5191f81da05a13ff8964727d58
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
|
toastedcornflakes/scikit-learn
|
sklearn/gaussian_process/tests/test_kernels.py
|
Python
|
bsd-3-clause
| 11,602
|
[
"Gaussian"
] |
37079c768ba784770c36fc1562cc4afec9e9ff744d2f964fac873393165dfcf9
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import pickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
#from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
#from frontend.feature_normalisation_base import FeatureNormBase
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
import configuration
from models.dnn import DNN
from models.ms_dnn import MultiStreamDNN
from models.ms_dnn_gv import MultiStreamDNNGv
from models.sdae import StackedDenoiseAutoEncoder
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layers_sizes = hyper_params['hidden_layers_sizes']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False)
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
train_set_x, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_next_partition()
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'SDAE':
##basic model is ready.
##if corruption levels is set to zero. it becomes normal autoencoder
dnn_model = StackedDenoiseAutoEncoder(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes)
if do_pretraining:
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x, batch_size)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
elif model_type == 'MSDNN': ##model is ready, but the hyper-parameters are not optimised.
dnn_model = MultiStreamDNN(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
elif model_type == 'MSDNN_GV': ## not fully ready
dnn_model = MultiStreamDNNGv(numpy_rng=numpy_rng, n_ins=n_ins, ms_outs=ms_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
stream_weights = stream_weights,
hidden_activation = hidden_activation,
output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y),
batch_size=batch_size, lr_weights = stream_lr_weights)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported in one model, add the switch here
## be careful to use autoencoder for pretraining here:
## for SDAE, currently only sigmoid function is supported in the hidden layers, as our input is scaled to [0, 1]
## however, tanh works better and converge fast in finetuning
##
## Will extend this soon...
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in range(dnn_model.n_layers):
for epoch in range(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
n_train_batches = pretrain_set_x.get_value().shape[0] / batch_size
for batch_index in range(n_train_batches):
pretrain_loss.append(pretraining_fn[i](index=batch_index,
corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
logger.debug('calculating validation loss')
validation_losses = valid_fn()
this_validation_loss = numpy.mean(validation_losses)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop >= early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
# visualize_dnn(dbn)
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x)
# predicted_parameter = test_out()
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, bottleneck_index):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x, bn_layer_index=bottleneck_index)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layers_sizes = cfg.hyper_params['hidden_layers_sizes']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
# nn_label_norm_mvn_dir = os.path.join(data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
# label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
# remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = ['*-#+*'])
# remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.items():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in range(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.items():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.values():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = [-0.5, 0.0, 0.5]
acc_win = [1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim, \
binary_label_file_list, lab_dim, silence_feature)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in list(cfg.out_dimension_dict.keys()):
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
# logger.debug(' value was\n%s' % cmp_norm_info)
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_std_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
# logger.debug(' value was\n%s' % feature_std_vector)
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layers_sizes))
for hid_size in hidden_layers_sizes:
combined_model_arch += '_' + str(hid_size)
# nnets_file_name = '%s/%s_%s_%d.%d.%d.%d.%d.train.%d.model' \
# %(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
# len(hidden_layers_sizes), hidden_layers_sizes[0],
# lab_dim, cfg.cmp_dim, cfg.train_file_number)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.model' \
%(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number)
### DNN model training
if cfg.TRAINDNN:
logger.info('training DNN')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
# print 'start DNN'
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
### generate parameters from DNN
if cfg.GENBNFEA:
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%s_hidden' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), combined_model_arch)
gen_dir = os.path.join(gen_dir, temp_dir_name)
bottleneck_size = min(hidden_layers_sizes)
bottleneck_index = 0
for i in range(len(hidden_layers_sizes)):
if hidden_layers_sizes[i] == bottleneck_size:
bottleneck_index = i
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%d_%d' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), max(hidden_layers_sizes), min(hidden_layers_sizes))
gen_dir = os.path.join(gen_dir, temp_dir_name)
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
# dnn_generation(valid_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list) # reference copy synthesis speech
### evaluation: calculate distortion
if cfg.CALMCD:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_vuv_error*100.))
# this can be removed
#
if 0: #to calculate distortion of HMM baseline
hmm_gen_no_silence_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400_no_silence'
hmm_gen_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400'
if 1:
hmm_mgc_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.mgc_ext)
hmm_bap_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.bap_ext)
hmm_lf0_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.lf0_ext)
hmm_mgc_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.mgc_ext)
hmm_bap_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.bap_ext)
hmm_lf0_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_mgc_list, in_gen_label_align_file_list, hmm_mgc_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_bap_list, in_gen_label_align_file_list, hmm_bap_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_lf0_list, in_gen_label_align_file_list, hmm_lf0_no_silence_list)
calculator = IndividualDistortionComp()
spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Develop: HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
spectral_distortion = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Test : HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
sys.exit(0)
|
bajibabu/merlin
|
src/work_in_progress/run_dnn_bottleneck.py
|
Python
|
apache-2.0
| 52,400
|
[
"NEURON"
] |
d47b4b21dad0fa56135e90a639265bd5c879d3bede50d28496c36dc932c83b1e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.