text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import numpy as np
def MM(i,j,e,A,B,C,a,b):
''' Recursive definition of Hermite Gaussian coefficients.
Returns a float.
a: orbital exponent on Gaussian 'a' (e.g. alpha in the text)
b: orbital exponent on Gaussian 'b' (e.g. beta in the text)
i,j: orbital angular momentum number on Gaussian 'a' and 'b'
t: number nodes in Hermite (depends on type of integral,
e.g. always zero for overlap integrals)
Qx: distance between origins of Gaussian 'a' and 'b'
'''
p = a + b
q = (a*b)/p
P = (1./p)*(a*A + b*B)
PA = P-A
PB = P-B
PC = P-C
AB = A-B
if i == j == e == 0:
# base case
return np.exp(-q*AB*AB) # K_AB
elif i<0 or j<0 or e<0:
return 0.
elif i == 0 and j == 0 and e>0:
# decrement index e
return PC*MM(i,j,e-1,A,B,C,a,b) +\
1./(2*p)*i*MM(i-1,j,e,A,B,C,a,b) +\
1./(2*p)*j*MM(i,j-1,e,A,B,C,a,b) +\
1./(2*p)*(e-1)*MM(i,j,e-2,A,B,C,a,b)
elif i == 0 and j>0 and e>0:
# decrement index j
return PB*MM(i,j-1,e,A,B,C,a,b) +\
1./(2*p)*i*MM(i-1,j-1,e,A,B,C,a,b) +\
1./(2*p)*(j-1)*MM(i,j-2,e,A,B,C,a,b) +\
1./(2*p)*e*MM(i,j-1,e-1,A,B,C,a,b)
else:
# decrement index i
return PA*MM(i-1,j,e,A,B,C,a,b) +\
1./(2*p)*(i-1)*MM(i-2,j,e,A,B,C,a,b) +\
1./(2*p)*j*MM(i-1,j-1,e,A,B,C,a,b) +\
1./(2*p)*e*MM(i-1,j,e-1,A,B,C,a,b)
def MMxyz(a,lmn1,A,b,lmn2,B,lmn3,C):
''' Evaluates overlap integral between two Gaussians
Returns a float.
a: orbital exponent on Gaussian 'a' (e.g. alpha in the text)
b: orbital exponent on Gaussian 'b' (e.g. beta in the text)
lmn1: int tuple containing orbital angular momentum (e.g. (1,0,0))
for Gaussian 'a'
lmn2: int tuple containing orbital angular momentum for Gaussian 'b'
A: list containing origin of Gaussian 'a', e.g. [1.0, 2.0, 0.0]
B: list containing origin of Gaussian 'b'
'''
i,k,m = lmn1 # shell angular momentum on Gaussian 'a'
j,l,n = lmn2 # shell angular momentum on Gaussian 'b'
e,f,g = lmn3 # angular momentum
MMx = MM(i,j,e,A[0],B[0],C[0],a,b) # X
MMy = MM(k,l,f,A[1],B[1],C[1],a,b) # Y
MMz = MM(m,n,g,A[2],B[2],C[2],a,b) # Z
return np.power(np.pi/(a+b),0.5)*MMx*MMy*MMz
def multipole_moment(a,b,lmn,C):
'''Evaluates overlap between two contracted Gaussians
Returns float.
Arguments:
a: contracted Gaussian 'a', BasisFunction object
b: contracted Gaussian 'b', BasisFunction object
'''
mm = 0.0
for ia, ca in enumerate(a.coefs):
for ib, cb in enumerate(b.coefs):
mm += a.norm[ia]*b.norm[ib]*ca*cb*\
MMxyz(a.exps[ia],a.shell,a.origin,
b.exps[ib],b.shell,b.origin,lmn,C)
return mm
|
fhqgfss/MoHa
|
moha/system/integral/multipole_moment.py
|
Python
|
mit
| 2,994
|
[
"Gaussian"
] |
96872ba3c820fd8399288ccd1b8730ec90deb501b396620145a638eaa3d2792c
|
#! /usr/bin/env python
import os,sys
gaff=['br', 'c', 'c1', 'c2', 'c3', 'ca', 'cc', 'cd', 'ce', 'cf', 'cg', 'cl', 'cp', 'cx', 'cy', 'f', 'h1', 'h2', 'h3', 'h4', 'h5', 'ha', 'hc', 'hn', 'ho', 'hs', 'i', 'n', 'n1', 'n2', 'n3', 'na', 'nb', 'nc', 'nd', 'ne', 'nf', 'nh', 'no', 'o', 'oh', 'os', 'p5', 's', 's4', 's6', 'sh', 'ss', 'sy']
amber=['Br', 'C', 'C*', 'CA', 'CB', 'CD', 'CK', 'Cl', 'CM', 'CN', 'CQ', 'CR', 'CT', 'CV', 'CW', 'CZ', 'DU', 'F', 'H', 'H1', 'H2', 'H3', 'H4', 'H5', 'HA', 'HC', 'HO', 'HS', 'I', 'N', 'N*', 'N1', 'N2', 'NA', 'NB', 'NC', 'NO', 'NT', 'O', 'O2', 'OH', 'OS', 'OW', 'P', 'S', 'SH', 'SO']
sybyl=['Br', 'C.1', 'C.2', 'C.3', 'C.ar', 'Cl', 'F', 'H', 'I', 'N.1', 'N.2', 'N.3', 'N.am', 'N.ar', 'N.pl3', 'O.2', 'O.3', 'P.3', 'S.2', 'S.3', 'S.o', 'S.o2']
bcc=['11', '12', '13', '14', '15', '16', '17', '21', '22', '23', '24', '25', '31', '32', '33', '42', '51', '52', '53', '71', '72', '73', '74', '91']
gas=['br', 'c1', 'c2', 'c3', 'cl', 'f', 'h', 'i', 'n1', 'n2', 'n3', 'na', 'o2', 'o3', 'os', 'p', 's2', 's3', 'so', 'so2']
if (__name__ == '__main__'):
fname=sys.argv[1]
fnamelist=os.path.splitext(fname)
if (fnamelist[1].lower()!='.pqrta'):
print 'Input should be pqrta format!'
exit(1)
fnum=fnamelist[0].split('_')[0];
settype=[]
if (sys.argv[2].lower()=='gaff'):settype=gaff
elif (sys.argv[2].lower()=='amber'):settype=amber
elif (sys.argv[2].lower()=='sybyl'):settype=sybyl
elif (sys.argv[2].lower()=='bcc'):settype=bcc
elif (sys.argv[2].lower()=='gas'):settype=gas
else:
print 'Atom type set should be given, gaff, amber, sybyl, bcc or gas!'
exit(1)
areatype={}
for t in settype:
areatype[t]=0.0
fr=open(fname)
for line in fr:
# Each atom
if (line[:4]=='ATOM' or line[:6]=='HETATM'):
tmp=line.split();
atype=tmp[-2].strip();
aarea=float(tmp[-1].strip());
if (atype not in settype): settype.append(atype);
areatype[atype]=areatype.setdefault(atype,0.0)+aarea;
fr.close()
outputatype=fnum+' '
outputaarea=fnum+' '
for atype in settype:
outputatype=outputatype+atype+' '
outputaarea=outputaarea+str(areatype[atype])+' ';
print outputatype+' : '+outputaarea;
|
platinhom/DailyTools
|
scripts/GetTypeAreaPQRTA.py
|
Python
|
gpl-2.0
| 2,131
|
[
"Amber"
] |
05797e20fb00ce30afd4747112d312bcddace8dc3d8731f9116c150d08583d76
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
from MooseDocs import common
from MooseDocs.tree import tokens
class TestHasTokens(unittest.TestCase):
def testBasic(self):
root = tokens.Token('', None)
tokens.Token('Test', root)
self.assertTrue(common.has_tokens(root, 'Test'))
self.assertFalse(common.has_tokens(root, 'Nope'))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
harterj/moose
|
python/MooseDocs/test/common/test_has_tokens.py
|
Python
|
lgpl-2.1
| 724
|
[
"MOOSE"
] |
8517390942d78ba993f911d643ee145561b7c01816f2fd586b0985ee70d0fe0d
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from .alertsmanager import AlertsManager
from .alertstab import AlertsTab
|
marmyshev/item_title
|
openlp/plugins/alerts/lib/__init__.py
|
Python
|
gpl-2.0
| 2,181
|
[
"Brian"
] |
48f75020115fbee183ea9efdceadbc7c89a0b8b52cb197aa3c685555b00487f9
|
from pycalphad import Database, Model, variables as v
from pycalphad.tests.datasets import ROSE_TDB
from pycalphad import equilibrium
import numpy as np
my_phases_rose = ['TEST']
comps = ['H', 'HE', 'LI', 'BE', 'B', 'C', 'N', 'O', 'F']
comps = sorted(comps)
conds = dict({v.T: 1000, v.P: 101325})
for comp in comps[1:]:
conds[v.X(comp)] = 1.0/float(len(comps))
dbf = Database(ROSE_TDB)
eqx = equilibrium(Database(ROSE_TDB), comps, my_phases_rose, conds, calc_opts={'pdens': 10}, verbose=True)
|
richardotis/pycalphad-sandbox
|
advconds.py
|
Python
|
mit
| 498
|
[
"pycalphad"
] |
73306102dce9a73e7a2efc7bf2d586ff59d776d69652222f3f62afb92a0e0bc0
|
# This file is part of askbot_fedmsg.
# Copyright (C) 2013 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
""" Plugin to emit fedmsg messages from an askbot instance.
Enable this plugin by editing the ``settings.py`` file in your askbot
instance.
Find MIDDLEWARE_CLASSES and add 'askbot_fedmsg.NOOPMiddleware'
to the tuple like:
MIDDLEWARE_CLASSES = (
...
'askbot_fedmsg.NOOPMiddleware',
...
)
"""
import fedmsg
import functools
import socket
hostname = socket.gethostname().split('.')[0]
fedmsg.init(name="askbot.%s" % hostname)
from django.core import serializers
import json
from django.dispatch import receiver
from askbot.models.signals import (
tags_updated,
edit_question_or_answer,
delete_question_or_answer,
flag_offensive,
remove_flag_offensive,
user_updated,
user_registered,
user_logged_in,
post_updated,
post_revision_published,
site_visited,
)
from askbot.deps import django_authopenid
def username(user):
""" Return the user's username... *unless* that user logged in via FAS
openid, in which case the FAS username is returned.
"""
assocs = django_authopenid.models.UserAssociation.objects.filter(user=user)
for association in assocs:
url = association.openid_url
if 'id.fedoraproject.org' in url:
return url.split('://')[1].split('.')[0]
# Otherwise
return user.username
def mangle_kwargs(kwargs):
""" Take kwargs as given to us by askbot and turn them into something that
more closely resembles messages on the fedmsg bus.
JSONify some django models.
"""
if 'signal' in kwargs:
del kwargs['signal']
user_keys = ['user', 'mark_by', 'delete_by', 'updated_by']
for key in user_keys:
if key in kwargs:
kwargs['agent'] = username(kwargs[key])
del kwargs[key]
if 'newly_mentioned_users' in kwargs:
kwargs['newly_mentioned_users'] = [
username(user) for user in list(kwargs['newly_mentioned_users'])]
if 'revision' in kwargs:
kwargs['agent'] = username(kwargs['revision'].author)
kwargs['revision'] = dict(
(key, getattr(kwargs['revision'], key)) for key in (
'tagnames', 'text', 'title', 'summary', 'pk',
))
kwargs['revision']['tagnames'] = \
kwargs['revision']['tagnames'].split(' ')
if 'post' in kwargs:
kwargs['thread'] = kwargs['post'].thread
kwargs['post'] = dict(
(key, getattr(kwargs['post'], key)) for key in (
'text', 'summary',
'post_type', 'comment_count',
'vote_up_count', 'vote_down_count', 'pk',
'language_code',
))
if 'instance' in kwargs:
kwargs['thread'] = kwargs['instance'].thread
kwargs['instance'] = dict(
(key, getattr(kwargs['instance'], key)) for key in (
'text', 'summary',
'post_type', 'comment_count',
'vote_up_count', 'vote_down_count', 'pk',
'language_code',
))
if 'thread' in kwargs:
kwargs['topmost_post_id'] = kwargs['thread']._question_post().pk
kwargs['thread'] = dict(
(key, getattr(kwargs['thread'], key)) for key in (
'tagnames', 'title', 'pk',
'language_code'
))
kwargs['thread']['tagnames'] = \
kwargs['thread']['tagnames'].split(' ')
if 'tags' in kwargs:
kwargs['tags'] = [tag.name for tag in kwargs['tags']]
return kwargs
def fedmsg_callback(sender, topic=None, **kwargs):
kwargs = mangle_kwargs(kwargs)
fedmsg.publish(topic=topic, modname="askbot", msg=kwargs)
# Here is where we actually hook our callback up to askbot signals system
signals = {
'tag.update': tags_updated,
'post.edit': edit_question_or_answer,
'post.delete': delete_question_or_answer,
'post.flag_offensive.add': flag_offensive,
'post.flag_offensive.delete': remove_flag_offensive,
#'user.update': user_updated,
#'user.new': user_registered,
'post.edit': post_updated,
#'post.revision.publish': post_revision_published,
#'site.visit': site_visited,
}
for topic, signal in signals.items():
signal.connect(functools.partial(fedmsg_callback, topic=topic), weak=False)
class NOOPMiddleware(object):
""" Register our message-emitting plugin with django.
Django middleware is supposed to provide a bunch of methods to act on the
request/response pipeline. We ignore that and instead use middleware as
a convenient vector to get ourselves into the askbot runtime environment.
"""
pass
|
fedora-infra/askbot-fedmsg
|
askbot_fedmsg.py
|
Python
|
lgpl-2.1
| 5,465
|
[
"VisIt"
] |
c14e7ad1163e2c09055403db2690246b61423e71ab263a7cd3c59f8228340046
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
return grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is None:
continue
if grad.dtype.is_floating:
if not inp.dtype.is_floating:
raise TypeError("Gradient type %s generated for real-valued op %s "
"with type %s must be real" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
elif grad.dtype.is_complex:
if not inp.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued op %s"
" with type %s must be complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
else:
raise TypeError("Gradient type %s generated for op %s "
"with type %s must be either real or complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
attrs = {"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(
s=("%s_grad_%s" % (xla_scope, scope)).encode())}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(name, "gradients", ys + xs + grad_ys) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in xs]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(xs, name="x",
as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and _IsTrainable(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_real_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_real_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_real_grad:
# For an unused exit, if it has floating-point outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unstack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.stack(_hess, name=name))
return hessians
|
tntnatbry/tensorflow
|
tensorflow/python/ops/gradients_impl.py
|
Python
|
apache-2.0
| 37,291
|
[
"VisIt"
] |
61b7f53f16f660a2ec01acc15db2b9c97092b50026e8b4f3ea8a0026e4903601
|
"""
Examine the distribution of diffraction spot intensities.
This module defines a class IntensityDist, with several methods for exploring
the distribution of measured spot intensities in an X-ray diffraction
experiment. The user may wish to use this information to inform decisions
regarding the error model employed in analysing the data. Data are passed in
as an unmerged MTZ file (see http://www.ccp4.ac.uk/html/mtzformat.html) and the
resulting IntensityDist instance contains the pertinent columns of data, along
with normal order statistic medians of the z-scores of the intensities, for
constructing a normal probability plot (See
https://www.itl.nist.gov/div898/handbook/eda/section3/normprpl.htm).
"""
from __future__ import annotations
import logging
import scipy.stats
from cctbx import miller
from dxtbx.model import ExperimentList
from dials.array_family import flex
log = logging.getLogger("dials.util.intensity_explorer")
class IntensityDist:
def __init__(
self,
rtable,
elist,
calculate_variances=False,
keep_singles=False,
uncertainty="sigma",
outfile=None,
):
"""
Generate z-scores and a normal probability plot from a DIALS
reflection_table and a dxtbx ExperimentList, containing the observations
and the corresponding experiments, respectively.
:param rtable: A reflection table object, containing at least the columns
* ``miller_index``
* ``intensity.sum.value``
* ``intensity.sum.variance``
* ``xyzobs.px.value``
:type rtable: dials.array_family_flex_ext.reflection_table
:param elist: A corresponding experiment list.
:type elist: dxtbx.model.ExperimentList
:param calculate_variances: Choose whether to calculate weighted
aggregate variances. Doing so incurs a performance penalty.
Defaults to False.
:type calculate_variances: bool
:param keep_singles: Choose whether to keep multiplicity-1 reflections.
Defaults to False.
:type keep_singles: bool
:param uncertainty: Measure of spread to use in normalising the
z-scores, i.e. z = (I - <I>) / uncertainty.
Possible values for uncertainty:
* 'sigma': Use measured sigma values;
* 'stddev': Use sample standard deviations calculated as
square-root of unbiased weighted sample variances
of symmetry-equivalent reflection intensities;
Defaults to 'sigma'.
:type uncertainty: str
:param outfile: Filename root for output PNG plots.
Defaults to None.
:type: outfile: str
"""
if not isinstance(rtable, flex.reflection_table) or not isinstance(
elist, ExperimentList
):
raise TypeError(
"Must be called with a reflection table and an experiment list."
)
rtable = rtable.copy()
# Discard unindexed reflections (only necessary because of
# https://github.com/dials/dials/issues/615 —
# TODO remove the line below when issue #615 is fixed).
rtable.del_selected(rtable["id"] == -1)
rtable["miller_index.asu"] = rtable["miller_index"]
# Divide reflections by the space group to which they have been indexed.
self.rtables = {
expt.crystal.get_space_group().make_tidy(): flex.reflection_table()
for expt in elist
}
for expt, sel in rtable.iterate_experiments_and_indices(elist):
sg = expt.crystal.get_space_group().make_tidy()
self.rtables[sg].extend(rtable.select(sel))
# Map Miller indices to asymmetric unit.
for space_group, rtable in self.rtables.items():
# TODO Handle anomalous flag sensibly. Currently assumes not anomalous.
miller.map_to_asu(space_group.type(), False, rtable["miller_index.asu"])
# Calculate normal probability plot data.
self._multiplicity_mean_error_stddev(
calculate_variances=calculate_variances, keep_singles=keep_singles
)
self._make_z(uncertainty)
self._probplot_data()
self.rtable = flex.reflection_table()
for rtable in self.rtables.values():
self.rtable.extend(rtable)
if not outfile:
outfile = ""
self.outfile = outfile
def _multiplicity_mean_error_stddev(
self, calculate_variances=False, keep_singles=False
):
"""
Calculate aggregate properties of grouped symmetry-equivalent reflections.
Populate the reflection table of observations with the following
properties:
* ``multiplicity`` — Multiplicity of observations of a given reflection
in the asymmetric unit;
:type: `dials.array_family_flex_ext.int` array
* ``intensity.mean.value`` — Mean of symmetry-equivalent reflections,
weighted by measurement error;
:type: `dials.array_family_flex_ext.double` array
* ``intensity.mean.std_error`` — Standard error on the weighted mean;
:type: `dials.array_family_flex_ext.double` array
* (optional) ``intensity.mean.variance`` — variance of
symmetry-equivalent reflections, weighted by measurement error;
:type: `dials.array_family_flex_ext.double` array
:param calculate_variances: Elect whether to calculate the weighted
variances. Defaults to False, to spare an expensive computation.
:type calculate_variances: bool
:param keep_singles: Choose whether to keep single-multiplicity
reflections.
:type keep_singles: bool
"""
for key, rtable in self.rtables.items():
# Sort the reflection table for speedier iteration.
rtable.sort("miller_index.asu")
# Record the positions of any multiplicity-1 reflections.
if not keep_singles:
singles = flex.size_t()
# Record the multiplicities.
multiplicity = flex.int()
# For weighted averaging.
weights = 1 / rtable["intensity.sum.variance"]
sum_weights = flex.double()
if calculate_variances:
sum_square_weights = flex.double()
# Calculate the weighted mean intensities.
i_means = flex.double()
# Calculate the standard deviations from unbiased weighted variances.
variances = flex.double()
# Iterate over the reflections, grouping by equivalent Miller index,
# to calculate multiplicities, weighted mean intensities, etc..
# Some time can be saved by only calculating variances if necessary.
# Initial values:
prev_index = None
count = 1
# The following will be set during loop iteration
i_sum, sum_weight, sum_square_weight = None, None, None
# One big loop through the entire reflection table:
for j in range(rtable.size()):
index = rtable["miller_index.asu"][j]
weight = weights[j]
# Aggregate within a symmetry-equivalent group of reflections:
if index == prev_index:
count += 1
i_sum += weight * rtable["intensity.sum.value"][j]
sum_weight += weight
if calculate_variances:
sum_square_weight += weight * weight
# Record the aggregated values for the group:
elif prev_index:
if count == 1 and not keep_singles:
singles.append(j - 1)
multiplicity.extend(flex.int(count, count))
i_means.extend(flex.double(count, i_sum / sum_weight))
sum_weights.extend(flex.double(count, sum_weight))
if calculate_variances:
sum_square_weights.extend(flex.double(count, sum_square_weight))
# And reinitialise:
prev_index = index
count = 1
i_sum = weight * rtable["intensity.sum.value"][j]
sum_weight = weight
if calculate_variances:
sum_square_weight = weight * weight
# Handle the first row:
else:
prev_index = rtable["miller_index.asu"][j]
i_sum = weight * rtable["intensity.sum.value"][j]
sum_weight = weight
if calculate_variances:
sum_square_weight = weight * weight
# Record the aggregated values for the last group:
if count == 1 and not keep_singles:
singles.append(rtable.size() - 1)
multiplicity.extend(flex.int(count, count))
i_means.extend(flex.double(count, i_sum / sum_weight))
sum_weights.extend(flex.double(count, sum_weight))
if calculate_variances:
sum_square_weights.extend(flex.double(count, sum_square_weight))
# Discard singletons:
if not keep_singles:
singles_del = flex.bool(rtable.size(), True)
singles_del.set_selected(singles, False)
multiplicity, weights, sum_weights, i_means = [
a.select(singles_del)
for a in (multiplicity, weights, sum_weights, i_means)
]
rtable.del_selected(singles)
if calculate_variances:
sum_square_weights = sum_square_weights.select(singles_del)
# Record the multiplicities in the reflection table.
rtable["multiplicity"] = multiplicity
# Record the weighted mean intensities in the reflection table.
rtable["intensity.mean.value"] = i_means
# Record the standard errors on the means in the reflection table.
rtable["intensity.mean.std_error"] = flex.sqrt(1 / sum_weights)
if calculate_variances:
# Initialise values:
prev_index = None
weighted_sum_square_residual = None
for j in range(rtable.size()):
index = rtable["miller_index.asu"][j]
weight = weights[j]
residual = rtable["intensity.sum.value"][j] - i_means[j]
# Aggregate within a symmetry-equivalent group of reflections:
if index == prev_index:
count += 1
weighted_sum_square_residual += weight * residual * residual
# Record the aggregated value for the group:
elif prev_index:
# The weighted variance is undefined for multiplicity=1,
# use the measured variance instead in this case.
if count == 1:
variances.append(rtable["intensity.sum.variance"][j - 1])
else:
sum_weight = sum_weights[j - 1]
var_weight = 1 / (
sum_weight - sum_square_weights[j - 1] / sum_weight
)
variances.extend(
flex.double(
count, weighted_sum_square_residual * var_weight
)
)
# Reinitialise:
prev_index = index
count = 1
weighted_sum_square_residual = weight * residual * residual
# Handle the first row:
else:
prev_index = rtable["miller_index.asu"][j]
count = 1
weighted_sum_square_residual = weight * residual * residual
# Record the aggregated values for the last group:
# The weighted variance is undefined for multiplicity=1,
# use the measured variance instead in this case.
if count == 1:
variances.append(rtable["intensity.sum.variance"][-1])
else:
sum_weight = sum_weights[-1]
var_weight = 1 / (sum_weight - sum_square_weights[-1] / sum_weight)
variances.extend(
flex.double(count, weighted_sum_square_residual * var_weight)
)
# Record the variances in the reflection table.
rtable["intensity.mean.variance"] = variances
self.rtables[key] = rtable
def _make_z(self, uncertainty="sigma"):
"""
Generate reflection z-scores.
Calculate z-scores from reflection intensities, weighted mean
intensities and a chosen measure of uncertainty in the intensity
measurement.
:param uncertainty: Chosen measure of uncertainty. Options are
* ``stddev`` — standard deviation, as calculated from the unbiased
weighted variance aggregated amongst all symmetry-equivalent reflections;
* ``sigma`` — measurement error for individual reflections.
:type uncertainty: str
"""
uncertainty_name = {
"sigma": "intensity.sum.variance",
"stddev": "intensity.mean.variance",
}[uncertainty]
for key, rtable in self.rtables.items():
try:
uncertainty_value = flex.sqrt(rtable[uncertainty_name])
except KeyError:
uncertainty_value = flex.sqrt(rtable["intensity.sum.variance"])
log.warn(
"""Weighted variances haven't been calculated,
be sure to specify calculate_variances=True to use them.
Defaulting to measured σ values as a measure of uncertainty instead."""
)
z = (
rtable["intensity.sum.value"] - rtable["intensity.mean.value"]
) / uncertainty_value
rtable["intensity.z_score"] = z
self.rtables[key] = rtable
def _probplot_data(self):
"""Generate the data for a normal probability plot of z-scores."""
for key, rtable in self.rtables.items():
order = flex.sort_permutation(rtable["intensity.z_score"])
osm = flex.double(rtable.size(), 0)
probplot = scipy.stats.probplot(rtable["intensity.z_score"], fit=False)
osm.set_selected(order, flex.double(probplot[0]))
rtable["intensity.order_statistic_medians"] = osm
self.rtables[key] = rtable
|
dials/dials
|
util/intensity_explorer.py
|
Python
|
bsd-3-clause
| 14,961
|
[
"CRYSTAL"
] |
dbe2661802e2b7fea961a7493639af91641d3a0ba9fbbdf2c8badb2ab5f0aff3
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import is_paddle_api
PDB_SET = "pdb.set_trace"
class CallTransformer(gast.NodeTransformer):
"""
This class transforms function calls into Static Graph Ast.
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of CallTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def _no_need_convert_call(self, node):
"""
Determines whether a function needs to be transformed by `convert_call`.
It doesn't need to be transformed when a function satisfies the following conditions:
1. It's a api of paddle
2. It's a python builtin function not include `len` and `zip`
"""
assert isinstance(node, gast.Call)
if is_paddle_api(node):
return True
func_str = ast_to_source_code(node.func).strip()
try:
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import is_builtin_len, is_builtin, is_builtin_zip
is_builtin = eval("is_builtin({})".format(func_str))
is_builtin_len = eval("is_builtin_len({})".format(func_str))
is_builtin_zip = eval("is_builtin_zip({})".format(func_str))
return is_builtin and not is_builtin_len and not is_builtin_zip
except Exception:
return False
def transform(self):
self.visit(self.root)
def visit_Call(self, node):
self.generic_visit(node)
if self._no_need_convert_call(node):
return node
func_str = ast_to_source_code(node.func).strip()
# NOTE(liym27): Don't convert `pad.set_trace` even if the convertion doesn't work finally, because
# it is clearer to see where it is called from.
if PDB_SET in func_str:
return node
new_func_str = "paddle.jit.dy2static.convert_call({})".format(func_str)
new_func_ast = gast.parse(new_func_str).body[0].value
node.func = new_func_ast
return node
|
luotao1/Paddle
|
python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py
|
Python
|
apache-2.0
| 2,985
|
[
"VisIt"
] |
bb9ed08d407470379b7fca75efb809bb0dcbb229af85e1363a6a7446ee15e67b
|
# vclamptest.py ---
#
# Filename: vclamptest.py
# Description:
# Author:
# Maintainer:
# Created: Wed Feb 6 16:25:52 2013 (+0530)
# Version:
# Last-Updated: Sun Jun 25 14:47:22 2017 (-0400)
# By: subha
# Update #: 149
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Set up a voltage clamp experiment with specified series of clamping
# voltage values
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import sys
sys.path.append('../../../python')
import moose
from moose import utils
# import cells
def vclamptest(axon, vclamp, duration=50e-3, delay=150e-3, solver='ee', vhold=None, mc=None, dc=None, simdt=1e-5, plotdt=0.25e-3):
"""Do a series of voltage clamp experiemnts on axon.
parameters:
axon: Compartment object to be voltage clamped
vclamp: array of clamping voltage values.
duration: duration of each clamp
delay: delay between successive application of clamping voltages
vhold: holding voltage, If None, the Em of the
axon is used.
mc: model container, the vclamp object will be created inside
mc/electronics. If None, we use axon.parent.parent
dc: data container, the data recording tables will be created
inside it. If None, we use axon.parent.parent
"""
if vhold is None:
vhold = axon.C.Em
if mc is None:
mc = axon.C.parent.parent
if dc is None:
dc = axon.C.parent.parent
electronics = moose.Neutral('%s/electronics' % (mc.path))
command = moose.PulseGen('%s/command_source' % (electronics.path))
clamp = moose.VClamp('%s/vclamp' % (electronics.path))
moose.connect(command, 'output', clamp, 'commandIn')
moose.connect(axon.C, 'VmOut', clamp, 'sensedIn')
moose.connect(clamp, 'currentOut', axon.C, 'injectMsg')
simtime = 0
command.count = len(vclamp)
command.baseLevel = vhold
for ii, clamping_voltage in enumerate(vclamp):
simtime += delay + duration
command.delay[ii] = delay
command.width[ii] = duration
command.level[ii] = clamping_voltage
injected = moose.Table('%s/Iinject' % (dc.path))
moose.connect(injected, 'requestOut', clamp, 'getCurrent')
voltage = moose.Table('%s/Vcommand' % (dc.path))
moose.connect(voltage, 'requestOut', command, 'getOutputValue')
vm = moose.Table('%s/Vm' % (dc.path))
moose.connect(vm, 'requestOut', axon.C, 'getVm')
utils.resetSim([mc.path, dc.path], simdt, plotdt, simmethod=solver)
moose.start(simtime)
ivec = np.asarray(injected.vector)
vvec = np.asarray(voltage.vector)
vmvec = np.asarray(vm.vector)
ts = np.linspace(0, simtime, len(vvec))
sidx = np.nonzero(np.diff(vvec) > 0)[0]
eidx = np.nonzero(np.diff(vvec) < 0)[0]
iarrays = []
for ii in range(len(vclamp)):
iarrays.append(ivec[sidx[ii]: eidx[ii]].copy())
return {
"Vm": vmvec,
"commandVoltage": vvec,
"inject": ivec,
"ts": ts,
"injectArrays": iarrays}
from matplotlib import pyplot as plt
sys.path.append('../../squid')
from squid import SquidAxon
def test():
mc = moose.Neutral('model')
dc = moose.Neutral('data')
nrn = moose.Neuron('%s/nrn' % (mc.path))
x = SquidAxon('%s/squid' % (nrn.path))
clampv = [10.0, 20.0, 30.0, 40.0, 50.0]
data = vclamptest(x, clampv, duration=20.0, delay=100.0, vhold=0.0, mc=mc, dc=dc, simdt=1e-2, plotdt=1e-2, solver='hsolve')
plt.subplot(311)
plt.title('Membrane potential throughout experiment')
plt.plot(data['ts'], data['Vm'], label='Vm')
plt.legend()
plt.subplot(312)
plt.title('Injection current throughout experiment')
plt.plot(data['ts'], data['inject'], label='Inject')
plt.legend()
plt.subplot(313)
plt.title('Injection currents for different clamp volatge values')
for ii, inject in enumerate(data['injectArrays']):
plt.plot(inject, label='V = %g' % (clampv[ii]))
plt.legend()
plt.show()
if __name__ == '__main__':
test()
#
# vclamptest.py ends here
|
BhallaLab/moose-examples
|
traub_2005/py/vclamptest.py
|
Python
|
gpl-2.0
| 4,772
|
[
"MOOSE",
"NEURON"
] |
38a29fdd1ef4631789ee009a54d9ded0749d8573903ca80f411f4faf29f56200
|
from random import random
import math
def sigmoid(x):
return 1/(1 + math.exp(-x))
def th(x):
return (math.exp(x) - math.exp(-x)) / (math.exp(x) + math.exp(-x))
class _AbstractNeuron():
def __init__(self):
self.out = 0
self.w = []
self.w0 = 0
self.dw = []
self.dw0 = 0
def calculate(self, x):
return self.out
def commit_teach(self):
pass
class Neuron(_AbstractNeuron):
def __init__(self, synapse_count):
_AbstractNeuron.__init__(self)
self.w = [random() * 0.3 - 0.15 for _ in range(synapse_count)]
self.w0 = random() * 0.3 - 0.15
self.dw = [0] * synapse_count
def add_synapse(self, count):
self.w = self.w + [random() * 0.3 - 0.15 for _ in range(count)]
self.dw = self.dw + [0]*count
def calculate(self, x):
net = 0
for i in range(len(x)):
net += x[i] * self.w[i]
self.out = th(net - self.w0)
return self.out
def commit_teach(self):
for i in range(len(self.w)):
self.w[i] += self.dw[i]
self.w0 += self.dw0
class InputNeuron(_AbstractNeuron):
def __init__(self):
_AbstractNeuron.__init__(self)
def calculate(self, x):
self.out = x
return self.out
class RandomNeuron(_AbstractNeuron):
def __init__(self, random_value):
_AbstractNeuron.__init__(self)
self.random_value = random_value
def calculate(self, x):
self.out = x + (random() - (1 - self.out) / 2) * self.random_value
return self.out
class BiasNeuron(_AbstractNeuron):
def __init__(self):
_AbstractNeuron.__init__(self)
self.out = 1
|
zshimanchik/unconditioned-reflexes
|
NeuralNetwork/Neuron.py
|
Python
|
mit
| 1,726
|
[
"NEURON"
] |
ce2aa0f23bf362950e70408883c9dfe64bb9285a0040378e3af4216a96a69f4f
|
import numpy as np
import numpy.linalg as nl
import numpy.random as nrnd
def log_sum_exp(X):
"""
Computes log sum_i exp(X_i).
Useful if you want to solve log \int f(x)p(x) dx
where you have samples from p(x) and can compute log f(x)
"""
# extract minimum
X0 = np.min(X)
X_without_X0 = np.delete(X, np.argmin(X))
return X0 + np.log(1 + np.sum(np.exp(X_without_X0 - X0)))
def log_mean_exp(X):
"""
Computes log 1/n sum_i exp(X_i).
Useful if you want to solve log \int f(x)p(x) dx
where you have samples from p(x) and can compute log f(x)
"""
return log_sum_exp(X) - np.log(len(X))
def avg_prob_of_log_probs(X):
"""
Given a set of log-probabilities, this computes log-mean-exp of them.
Careful checking is done to prevent buffer overflows
Similar to calling (but overflow-safe): log_mean_exp(log_prob)
"""
# extract inf inds (no need to delete X0 from X here)
X0 = X.min()
inf_inds = np.isinf(np.exp(X - X0))
# remove these numbers
X_without_inf = X[~inf_inds]
# return exp-log-mean-exp on shortened array
avg_prob_without_inf = np.exp(log_mean_exp(X_without_inf))
# re-normalise by the full length, which is equivalent to adding a zero probability observation
renormaliser = float(len(X_without_inf)) / len(X)
avg_prob_without_inf = avg_prob_without_inf * renormaliser
return avg_prob_without_inf
def qmult(b):
"""
QMULT Pre-multiply by random orthogonal matrix.
QMULT(A) is Q*A where Q is a random real orthogonal matrix from
the Haar distribution, of dimension the number of rows in A.
Special case: if A is a scalar then QMULT(A) is the same as
QMULT(EYE(A)).
Called by RANDSVD.
Reference:
G.W. Stewart, The efficient generation of random
orthogonal matrices with an application to condition estimators,
SIAM J. Numer. Anal., 17 (1980), 403-409.
"""
try:
n, _ = b.shape
a = b.copy()
except AttributeError:
n = b
a = np.eye(n)
d = np.zeros(n)
for k in range(n - 2, -1, -1):
# Generate random Householder transformation.
x = nrnd.randn(n - k)
s = nl.norm(x)
# Modification to make sign(0) == 1
sgn = np.sign(x[0]) + float(x[0] == 0)
s = sgn * s
d[k] = -sgn
x[0] = x[0] + s
beta = s * x[0]
# Apply the transformation to a
y = np.dot(x, a[k:n, :])
a[k:n, :] = a[k:n, :] - np.outer(x, (y / beta))
# Tidy up signs.
for i in range(n - 1):
a[i, :] = d[i] * a[i, :]
# Now randomly change the sign (Gaussian dist)
a[n - 1, :] = a[n - 1, :] * np.sign(nrnd.randn())
return a
|
karlnapf/kernel_exp_family
|
kernel_exp_family/tools/numerics.py
|
Python
|
bsd-3-clause
| 2,797
|
[
"Gaussian"
] |
6a0c2bc0e5c958c2526e0d07b7d3051abc747c7cf610fadccb03628f79ceef03
|
import os
import time
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit as u
from fah_parameters import *
def write_file(filename, contents):
with open(filename, 'w') as outfile:
outfile.write(contents)
rundir = "./RUNS_NVT/RUN0/"
nclones = 500
system_filename = os.path.join(rundir, "system.xml")
integrator_filename = os.path.join(rundir, "integrator.xml")
pdb_filename = "./equil_nvt/equil_nvt.pdb"
pdb = app.PDBFile(pdb_filename)
topology = pdb.topology
positions = pdb.positions
ff_name = "amber99sbildn"
water_name = 'tip3p'
which_forcefield = "%s.xml" % ff_name
which_water = '%s.xml' % water_name
ff = app.ForceField(which_forcefield, which_water)
system = ff.createSystem(topology, nonbondedMethod=app.PME, nonbondedCutoff=cutoff, constraints=app.HBonds)
for force in system.getForces():
try:
force.setUseDispersionCorrection(False)
except AttributeError:
pass
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
write_file(system_filename, mm.XmlSerializer.serialize(system))
write_file(integrator_filename, mm.XmlSerializer.serialize(integrator))
for clone_index in range(nclones):
simulation.context.setVelocitiesToTemperature(temperature)
state = simulation.context.getState(getPositions=True, getVelocities=True, getForces=True, getEnergy=True, getParameters=True, enforcePeriodicBox=True)
state_filename = os.path.join(rundir, 'state%d.xml' % clone_index)
serialized = mm.XmlSerializer.serialize(state)
write_file(state_filename, serialized)
|
kyleabeauchamp/fah-projects
|
code/packaging_nvt.py
|
Python
|
gpl-2.0
| 1,730
|
[
"OpenMM"
] |
f731ac7f5d918ba78c4cc7620f9b4b0e06cc94d5f36823bde2ed619c43c7a7c0
|
#!/usr/bin/env python
import commands, sys
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
from msct_parser import Parser
import sct_utils as sct
# DEFAULT PARAMETERS
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.verbose = 1 # verbose
self.remove_temp_files = 1
# self.type_window = 'hanning' # for smooth_centerline @sct_straighten_spinalcord
# self.window_length = 80 # for smooth_centerline @sct_straighten_spinalcord
# self.algo_fitting = 'nurbs'
# self.list_files = []
# self.output_file_name = ''
self.type_noise = 'Rician'
def main(input_anatomy_file, list_files, param, remove_temp_files = 1, verbose = 0) :
path, file, ext = sct.extract_fname(input_anatomy_file)
# Image denoising
print '\nDenoising image ' + input_anatomy_file +'...'
sct.run('sct_denoising_onlm.py -i '+ input_anatomy_file + ' -p ' + type_noise + ' -r ' + str(remove_temp_files) + ' -v ' + str(verbose))
# Extract and fit centerline
list_name_files = list_files[0]
for i in range(1, len(list_files)):
list_name_files = list_name_files + ',' + list_files[i]
print '\nExtracting and fitting centerline...'
sct.run('sct_get_centerline_from_labels -i '+ list_name_files + ' -r ' + str(remove_temp_files) + ' -v ' + str(verbose))
# Straighten the image using the fitted centerline
print '\nStraightening the image ' + input_anatomy_file + ' using the fitted centerline ' + 'generated_centerline.nii.gz'+ ' ...'
sct.run('sct_straighten_spinalcord -i ' + input_anatomy_file + ' -c ' + 'generated_centerline.nii.gz' + ' -r ' + str(remove_temp_files) + ' -v ' + str(verbose))
output_straighten_name = file + '_straight' +ext
# Aplly transfo to the centerline
print '\nApplying transformation to the centerline...'
sct.run('sct_apply_transfo -i ' + 'generated_centerline.nii.gz' + ' -d ' + output_straighten_name + ' -w ' + 'warp_curve2straight.nii.gz' + ' -x ' + 'linear' + ' -v ' + str(verbose))
# Normalize intensity of the image using the straightened centerline
print '\nNormalizing intensity of the straightened image...'
sct.run('sct_normalize.py -i ' + output_straighten_name + ' -c generated_centerline_reg.nii.gz' + ' -v ' + str(verbose))
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Preprocessing of data: denoise, extract and fit the centerline, straighten the image using the fitted centerline and finally normalize the intensity.')
parser.add_option(name="-i",
type_value='file',
description="Anatomic NIFTI image file.",
mandatory=True)
parser.add_option(name="-l",
type_value=[[','],'file'],
description="List containing segmentation NIFTI file and label NIFTI files. They must be 3D. Names must be separated by commas without spaces. The list must at least contain a segmentation file.",
mandatory=True)
# parser.add_option(name="-o",
# type_value="file_output",
# description="Name of the output NIFTI image with the centerline and of the output text file with the coordinates (z, x, y) (but text file will have '.txt' extension).",
# mandatory=False,
# default_value='generated_centerline.nii.gz')
parser.add_option(name="-p",
type_value="multiple_choice",
description="Type of supposed noise: Rician or Gaussian. Default is Rician.",
mandatory=False,
example=["Rician","Gaussian"],
default_value="Rician")
parser.add_option(name="-r",
type_value="multiple_choice",
description="Remove temporary files. Specify 0 to get access to temporary files.",
mandatory=False,
example=['0','1'],
default_value="1")
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing. 1: basic. 2: extended.",
mandatory=False,
default_value='0',
example=['0', '1', '2'])
arguments = parser.parse(sys.argv[1:])
remove_temp_files = int(arguments["-r"])
verbose = int(arguments["-v"])
type_noise = arguments["-p"]
if "-i" in arguments:
input_anatomy_file = arguments["-i"]
if "-l" in arguments:
list_files = arguments["-l"]
# if "-o" in arguments:
# output_file_name = arguments["-o"]
# else: output_file_name = None
param = Param()
param.verbose = verbose
param.remove_temp_files = remove_temp_files
param.type_noise = type_noise
main(input_anatomy_file, list_files, param, remove_temp_files, verbose)
|
3324fr/spinalcordtoolbox
|
dev/tamag/old/sct_function_preprocessing.py
|
Python
|
mit
| 5,437
|
[
"Gaussian"
] |
e2c109fe87c9c1c350d7dca0304a547151b34be04db55f11f2312e025fedbcd8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pyqt4topyqt5.py
# Nov. 12 2013
# Author: Vincent Vande Vyvre <vincent.vandevyvre@swing.be>
# Copyright: 2013 Vincent Vande Vyvre
# Licence: LGPL3
import os
import glob
import re
import shutil
import argparse
import sys
import tokenize
import subprocess
import stat
from datetime import datetime
from codecs import BOM_UTF8, lookup, open as open_
PY_VERS = sys.version_info[0]
if PY_VERS < 3:
from StringIO import StringIO
range_ = xrange
else:
from io import StringIO
range_ = range
from .qtclass import MODULES, CLASSES, DISCARDED, QAPP_STATIC_METHODS, QVARIANT_OBSOLETE_METHODS
L_SEP = os.linesep
PYEXT = (os.extsep + "py", os.extsep + "pxi")
PYSHEBANG = ("#!/usr/bin/env python", "#!/usr/bin/python")
MOD_RE = {'QtGui': re.compile(r'(?<=QtGui\.)(.*?)(?=[.\(\),\]:]|\Z)', re.DOTALL),
'QtWebKit': re.compile(r'(?<=QtWebKit\.)(.*?)(?=[.\(\),\]:]|\Z)', re.DOTALL)}
SIG_RE = {'fun_re': re.compile(r'(?<=\()(.*)(?=\))', re.DOTALL),
'sig_re': re.compile(r'''(?<=SIGNAL\(["' ])(.*?)(?=["',])''', re.DOTALL),
'slot_re': re.compile(r'''(?<=SLOT\(["'])(.*?)(?=["'])''', re.DOTALL),
'pysig_re': re.compile(r'''(?<=["'])(.*?)(?=["'])''', re.DOTALL)}
DOT_RE = re.compile(r'(?<=\()(.*?)(?=\.NoDotAndDotDot)')
WHEEL_RE = re.compile(r'(?<=def wheelEvent\(self,)(.*?)(?=\):)')
LAYOUT_RE = re.compile(r'(.*?)(\=)(.*?)(?=Layout\()')
DSK_RE = re.compile(r'(.*?)(\=)(.*?)(?=QDesktopServices\()')
DATE_RE = re.compile(r'(.*?)(\=)(.*?)(?=QDate\()')
CLS_RE = re.compile(r'(?<=class )(.*?)(?=[\(:])')
# Utils
def diff_parenthesis(line):
opened = line.count('(')
closed = line.count(')')
return opened - closed
class PyQt4ToPyQt5(object):
def __init__(self, source, dest, log, nopyqt5):
self.log = log
self.source = source
self.dest = dest
self.indent = ' '
self.tools = Tools()
self._has_qtwidget_import = False
self._added_pyqtSignal = False
self._pyqt5 = not nopyqt5
def setup(self):
self.print_('Processing file: `%s`' % self.source)
self.modified = {'QtGui': False, 'QtWidgets': False,
'QtWebKit': False, 'QtWebKitWidgets': False,
'QtMultimedia': False, 'QSound': False,
'QtCore': False, 'QtPrintSupport': False,
'QStandardPaths': False}
src = self.tools.get_code_lines(self.source)
if src is None:
self.print_(' Error: Unable to read the file: %s\n Reason: %s\n'
% (self.source, self.tools.last_error))
return
try:
self.indent = self.get_token_indent(''.join(src))[0]
except IndexError:
# Never seen a PyQt4 script without indentation, but ...
self.indent = ' '
# src is the list of logical lines code, NOT physical lines
qt4, sig, gui, web = self.get_import_lines(src)
if not any([qt4, sig, gui, web]):
self.print_(' No changes needed.\n')
return
# call before updating signals and slots
if self._pyqt5:
self.remove_fromUtf8(src)
# call before change_module_name
if sig:
self.fix_emit(src)
self.fix_connect(src)
self.fix_disconnect(src)
self.fix_signal(src)
self.fix_slot(src)
if gui and self._pyqt5:
src = self.change_module_name(src, 'QtGui', 'QtCore')
src = self.change_module_name(src, 'QtGui', 'QtWidgets')
src = self.change_module_name(src, 'QtGui', 'QtPrintSupport')
if web and self._pyqt5:
src = self.change_module_name(src, 'QtWebKit', 'QtWebKitWidgets')
# call after the signals and slots have been fixed
src = self.change_import_lines(src)
if self._pyqt5:
self.fix_qfiledialog(src)
self.fix_qdir(src)
self.fix_qwidget(src)
self.fix_qtscript(src)
self.fix_qtxml(src)
self.fix_qtdeclarative(src)
self.fix_qgraphicsitemanimation(src)
self.fix_qtopengl(src)
self.fix_translations(src)
self.fix_wheelevent(src)
self.fix_layoutmargin(src)
self.fix_qdesktopservices(src)
self.fix_qdate(src)
self.fix_qgraphicsitem(src)
self.fix_qheader(src)
self.fix_qinputdialog(src)
self.fix_qchar(src)
self.fix_qstring(src)
self.fix_qglobal(src)
self.fix_qvariant(src)
self.replace_classnames(src)
self.replace_qApp(src)
self.finish_process(src)
def finish_process(self, src):
src, fixs = self.clean_file(src)
self.save_changes(src)
if fixs:
if len(fixs) == 1:
txt = " FIXME added:\n%s" % fixs[0][:-1]
else:
txt = " FIXMEs added:\n" + ''.join(fixs)[:-1]
self.print_(txt)
self.print_(' File updated.\n')
def get_import_lines(self, lines):
"""Check if changes are needed.
Args:
lines -- source code
Returns:
(True, True, True) if there's PyQt4 or/and QtGui or/and QtWebkit imports
"""
qt4 = sig = gui = web = False
for line in lines:
if self.is_code_line(line) and ('SIGNAL(' in line or 'SLOT(' in line or 'emit(' in line):
sig = True
if line.lstrip().startswith(('import ', 'from ')) and 'PyQt4' in line:
qt4 = True
if '.Qt' in line:
gui = True
web = True
if 'QtGui' in line:
gui = True
if 'QtWebKit' in line:
web = True
if all([sig, gui, web]):
break
return qt4, sig, gui, web
def change_module_name(self, lines, old_mod, new_mod):
"""Change the module name for the class wich are moved to a new module.
Args:
lines -- source code
old_mod -- the old name of the module
new_mod -- the name of the module where the class has been moved
"""
fixme = "# FIXME$ Ambiguous syntax, can't refactor it\n"
classes = CLASSES[new_mod]
news = []
count = 0
def get_module_name(widget):
if widget == 'QSound':
self.modified['QtMultimedia'] = True
self.modified['QSound'] = True
return 'QtMultimedia'
if widget == 'QStringListModel':
self.modified['QtCore'] = True
return 'QtCore'
if widget in classes:
self.modified[new_mod] = True
return new_mod
self.modified[old_mod] = True
return old_mod
while count < len(lines):
line = lines[count]
if not self.is_code_line(line) or ' import ' in line:
news.append(line)
count += 1
continue
if old_mod in line:
names = MOD_RE[old_mod].findall(line)
if not names:
news.append(line)
count += 1
continue
new = []
parts = line.split(old_mod)
if line.startswith(old_mod):
name = names.pop(0).strip()
new.append(get_module_name(name))
for part in parts[:-1]:
if not part:
continue
new.append(part)
try:
name = names.pop(0).strip()
new.append(get_module_name(name))
except IndexError:
indent = self.get_token_indent(line)
news.append("%s%s" % (indent, fixme))
news.append(line)
count += 2
continue
new.append(parts[-1])
ln = ''.join(new)
news.append(ln)
count += 1
else:
news.append(lines[count])
count += 1
return news
def fix_qfiledialog(self, lines):
"""Change the name of the class QFileDialog.
Args:
lines -- source code
"""
olds = ('.getOpenFileNamesAndFilter', '.getOpenFileNameAndFilter', '.getSaveFileNameAndFilter')
news = ('.getOpenFileNames', '.getOpenFileName', '.getSaveFileName')
count = 0
while count < len(lines):
if not self.is_code_line(lines[count]):
pass
elif 'AndFilter' in lines[count]:
for old in olds:
if old in lines[count]:
lines[count] = lines[count].replace('AndFilter', '')
break
elif 'FileName' in lines[count]:
for new in news:
if new in lines[count]:
line = lines[count].rstrip()
if self.count_ref(line.split('=')[0]) == 2:
continue
# Since the old method returns a str and the new one
# returns a tuple, we insert an indice [0] into the
# final parenthesis
_, end = self.find_closing_parenthesis(line, new)
lines[count] = ''.join([line[:end+1], '[0]', line[end+1:], '\n'])
break
count += 1
def fix_qdir(self, lines):
"""Replace QDir filter NoDotAndDotDot by filters NoDot and NoDotDot
and convertSeparators() by toNativeSeparators()
Args:
lines -- source code
"""
for idx, line in enumerate(lines):
if self.is_code_line(line):
if '.NoDotAndDotDot' in line:
inst = DOT_RE.search(line.lstrip())
if inst is not None:
name = inst.group(0).split('|')[-1].lstrip()
rep = '.NoDot | %s.NoDotDot' % name
lines[idx] = line.replace('.NoDotAndDotDot', rep)
if '.convertSeparators(' in line:
lines[idx] = line.replace('convertSeparators', 'toNativeSeparators')
def fix_qwidget(self, lines):
"""
Checks if some QWidget classes are used without importing the QWidget module
This function is SLOW
"""
def import_qwidgets():
i = 0
self._has_qtwidget_import = True
while i < len(lines):
l = lines[i]
if self.is_code_line(l) and l.lstrip().startswith(('import ', 'from ')) and not '__future__' in l:
indent = self.get_token_indent(l)
lines.insert(i+1, indent + 'from PyQt5.QtWidgets import *\n')
return
i += 1
if self._has_qtwidget_import:
return
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
for w in CLASSES['QtWidgets']:
if w in line:
import_qwidgets()
return
count += 1
def fix_qtscript(self, lines):
"""Insert a FIXME for the class QtScript and QtScriptTools.
Args:
lines -- source code
"""
fixme = '# FIXME$ QtScript and QtScriptTools are no longer supported.\n'
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if 'QtScript' in line or 'QScript' in line:
indent = self.get_token_indent(line)
lines.insert(count, '%s%s' %(indent, fixme))
count += 1
count += 1
def fix_qtxml(self, lines):
"""Insert a FIXME for the classes QXMLStreamReader and QXMLStreamWriter.
Args:
lines -- source code
"""
fixme = '# FIXME$ QtXml is no longer supported.\n'
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if 'QtXml' in line:
indent = self.get_token_indent(line)
lines.insert(count, '%s%s' %(indent, fixme))
count += 1
count += 1
def fix_qtdeclarative(self, lines):
"""Insert a FIXME for the class QtDeclarative.
Args:
lines -- source code
"""
fixme = '# FIXME$ QtDeclarative module is no longer supported.\n'
names = ['QtDeclarative', 'QDeclarative', 'QPyDeclarative']
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
for name in names:
if name in line:
indent = self.get_token_indent(line)
lines.insert(count, '%s%s' %(indent, fixme))
count += 1
break
count += 1
def fix_qgraphicsitemanimation(self, lines):
"""Insert a FIXME for the class QGraphicsItemAnimation
Args:
lines -- source code
"""
fixme = '# FIXME$ QGraphicsItemAnimation class is no longer supported.\n'
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if 'QGraphicsItemAnimation' in line:
indent = self.get_token_indent(line)
lines.insert(count, '%s%s' %(indent, fixme))
count += 1
count += 1
def fix_qtopengl(self, lines):
"""Insert a FIXME for the module QtOpenGl
Args:
lines -- source code
"""
fixme = '# FIXME$ Only QGLContext, QGLFormat and QGLWidget are supported.\n'
classes = DISCARDED['QtOpenGl']
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if 'QGL' in line:
for cls in classes:
if cls in line:
indent = self.get_token_indent(line)
lines.insert(count, '%s%s' %(indent, fixme))
count += 1
break
count += 1
def split_function(self, function):
slices = ['']
current = 0
i = 0
while i < len(function):
if function[i] == ',':
slices.append('')
current += 1
i += 1
continue
slices[current] += function[i]
if function[i] == '(':
inside = 1
while inside != 0:
i += 1
# Skip over strings (which may contain parentheses).
# TODO: handle triple-quoted strings
if function[i] == '"' or function[i] == "'":
str_delimiter = function[i]
while True:
slices[current] += function[i]
i += 1
if function[i] == str_delimiter:
break
if function[i] == '(':
inside += 1
elif function[i] == ')':
inside -= 1
slices[current] += function[i]
i += 1
result = []
i = 0
while i < len(slices):
if 'lambda ' in slices[i]:
lambda_f = ''
while i < len(slices):
if lambda_f == '':
lambda_f += slices[i]
else:
lambda_f += ',' + slices[i]
if ':' in slices[i]:
break
i += 1
result.append(lambda_f)
else:
result.append(slices[i])
i += 1
if len(result) == 1 and result[0].strip() == '':
return []
return [s.strip() for s in result]
def remove_signal_slot(self, el):
"""Removes old-style signal/slot declarations which use the SIGNAL/SLOT nomenclature.
Args:
el -- string containing a signal/slot declaration
Returns:
list -- signal/slot name followed by signal/slot arguments
"""
if "SIGNAL(" in el or "SLOT(" in el:
# Note: This assumes that SIGNAL/SLOT is the first function declared in el.
content = SIG_RE['fun_re'].search(el).groups()[0]
content = content.strip()
if not content.startswith(('"', "'")):
# Unusual signal/slot declaration--not of the form 'name(args)'
# Return the entire declaration string as the signal/slot name.
print('WARNING: Invalid signal/slot declaration syntax:'+content)
return [content]
content = content.strip('\'"')
slices = content.split('(')
slices[0] = slices[0].lstrip()
if len(slices) == 1:
return slices
return [slices[0]] + self.split_function(self.clean_signal_args(slices[1].replace(')', '')))
# Signal/slot not declared with SIGNAL/SLOT nomenclature.
# Return the entire string as the signal/slot name.
return [el]
def create_signal(self, lines, currentIdx, signal):
"""Adds the declaration of a new pyqtSignal class member.
Args:
lines -- the list of source code lines
currentIdx -- index into lines list where use of signal was detected
Returns:
int -- number of additional lines inserted into lines list
"""
module = signal.split('SIGNAL(')[0]
signal = self.remove_signal_slot(signal)
name = signal[0]
line = lines[currentIdx]
while not self.is_code_line(line) or not 'class ' in line:
currentIdx -= 1
line = lines[currentIdx]
currentIdx += 1
line = lines[currentIdx]
while True:
if self.is_code_line(line) and name in line:
return 0
if self.is_code_line(line) and not 'pyqtSignal' in line:
break
currentIdx += 1
line = lines[currentIdx]
indent = self.get_token_indent(line)
if lines[currentIdx-1] == "\n":
currentIdx -= 1
if len(signal) == 1 or signal[0] == 'sslErrors':
lines.insert(currentIdx, "%s = %spyqtSignal()\n" % (indent + name, module))
else:
type_str = ', '.join(signal[1:]).replace('::', '.')
lines.insert(currentIdx, "%s = %spyqtSignal(%s)\n" % (indent + name, module, type_str))
self._added_pyqtSignal = True
currentIdx += 1
line = lines[currentIdx]
if line.lstrip().startswith('def '):
lines.insert(currentIdx, "\n")
return 2
else:
return 1
def fix_connect(self, lines):
"""Refactor the pyqtSignal.connect()
PyQt4 supports five versions of the connect() method:
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_QOBJECT, SIP_SLOT, Qt::ConnectionType=Qt::AutoConnection)
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_QOBJECT, SIP_SIGNAL, Qt::ConnectionType=Qt::AutoConnection)
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_SLOT, Qt::ConnectionType=Qt::AutoConnection)
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_SIGNAL, Qt::ConnectionType=Qt::AutoConnection)
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_PYCALLABLE, Qt::ConnectionType=Qt::AutoConnection)
Args:
lines -- source code
"""
count = 0
while count < len(lines):
line = lines[count]
if not self.is_code_line(line) or not '.connect(' in line:
count += 1
continue
if not "SIGNAL(" in line:
count += 1
continue
parts = line.split('.connect(')
function = SIG_RE['fun_re'].search('('+parts[1])
if function is None:
count += 1
continue
# parse function arguments
args = self.split_function(function.groups()[0])
if len(args) < 3 or len(args) > 5:
print('WARNING: Invalid connect() syntax:'+line)
count += 1
continue
# parse signal argument
if not "SIGNAL(" in args[1]:
print('WARNING: Invalid connect() syntax:'+line)
count += 1
continue
signal_obj = args[0]
signal = self.remove_signal_slot(args[1])
signal_fun = signal[0]
signal_args = ''
if len(signal) > 1 and signal[0] != 'sslErrors':
signal_args = ', '.join(signal[1:]).replace('::', '.')
# parse slot argument (which could be another signal)
slot_obj = ''
slot_args = ''
slot_signal = ''
if "SLOT(" in args[2] or "SIGNAL(" in args[2]:
if "SIGNAL(" in args[2]:
slot_signal = args[2]
slot_obj = 'self'
slot = self.remove_signal_slot(args[2])
slot_fun = slot[0]
if len(slot) > 1 and slot[0] != 'sslErrors':
slot_args = ', '.join(slot[1:]).replace('::', '.')
other_args = ', '.join(args[3:])
elif len(args) > 3 and ("SLOT(" in args[3] or "SIGNAL(" in args[3]):
if "SIGNAL(" in args[3]:
slot_signal = args[3]
slot_obj = args[2]
slot = self.remove_signal_slot(args[3])
slot_fun = slot[0]
if len(slot) > 1 and slot[0] != 'sslErrors':
slot_args = ', '.join(slot[1:]).replace('::', '.')
other_args = ', '.join(args[4:])
else:
slot_fun = args[2]
other_args = ', '.join(args[3:])
# put everything together
indent = self.get_token_indent(line)
lines[count] = indent + '%s.%s' % (signal_obj, signal_fun)
if signal_args:
lines[count] += '[%s]' % signal_args
lines[count] += '.connect('
if slot_obj:
lines[count] += '%s.' % slot_obj
lines[count] += '%s' % slot_fun
if slot_signal and slot_args:
lines[count] += '[%s]' % slot_args
if other_args:
lines[count] += ', %s' % other_args
lines[count] += ')\n'
if slot_signal:
count += self.create_signal(lines, count, slot_signal)
count += 1
def fix_disconnect(self, lines):
"""Refactor the pyqtSignal.disconnect()
PyQt4 supports three versions of the disconnect() method:
disconnect(SIP_QOBJECT, SIP_SIGNAL, SIP_QOBJECT, SIP_SLOT)
disconnect(SIP_QOBJECT, SIP_SIGNAL, SIP_QOBJECT, SIP_SIGNAL)
disconnect(SIP_QOBJECT, SIP_SIGNAL, SIP_PYCALLABLE)
PyQt4 does not support these versions of the disconnect() method (but this script does):
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_SLOT)
connect(SIP_QOBJECT, SIP_SIGNAL, SIP_SIGNAL)
Args:
lines -- source code
"""
for idx, line in enumerate(lines):
if not self.is_code_line(line) or not '.disconnect(' in line:
continue
if not "SIGNAL(" in line:
continue
parts = line.split('.disconnect(')
function = SIG_RE['fun_re'].search('('+parts[1])
if function is None:
continue
# parse function arguments
args = self.split_function(function.groups()[0])
if len(args) < 3 or len(args) > 4:
print('WARNING: Invalid disconnect() syntax:'+line)
continue
# parse signal argument
if not "SIGNAL(" in args[1]:
print('WARNING: Invalid disconnect() syntax:'+line)
continue
signal_obj = args[0]
signal = self.remove_signal_slot(args[1])
signal_fun = signal[0]
signal_args = ''
if len(signal) > 1 and signal[0] != 'sslErrors':
signal_args = ', '.join(signal[1:]).replace('::', '.')
# parse slot argument
slot_obj = ''
slot_args = ''
slot_signal = ''
if "SLOT(" in args[2] or "SIGNAL(" in args[2]:
if "SIGNAL(" in args[2]:
slot_signal = args[2]
slot_obj = 'self'
slot = self.remove_signal_slot(args[2])
slot_fun = slot[0]
if len(slot) > 1 and slot[0] != 'sslErrors':
slot_args = ', '.join(slot[1:]).replace('::', '.')
elif len(args) > 3:
if "SLOT(" not in args[3] and "SIGNAL(" not in args[3]:
print('WARNING: Invalid disconnect() syntax:'+line)
continue
if "SIGNAL(" in args[3]:
slot_signal = args[3]
slot_obj = args[2]
slot = self.remove_signal_slot(args[3])
slot_fun = slot[0]
if len(slot) > 1 and slot[0] != 'sslErrors':
slot_args = ', '.join(slot[1:]).replace('::', '.')
else:
slot_fun = args[2]
# put everything together
indent = self.get_token_indent(line)
lines[idx] = indent + '%s.%s' % (signal_obj, signal_fun)
if signal_args:
lines[idx] += '[%s]' % signal_args
lines[idx] += '.disconnect('
if slot_obj:
lines[idx] += '%s.' % slot_obj
lines[idx] += '%s' % slot_fun
if slot_signal and slot_args:
lines[idx] += '[%s]' % slot_args
lines[idx] += ')\n'
def fix_signal(self, lines):
"""
clean decorator arguments
"""
for idx, line in enumerate(lines):
if '@pyqtSignal' in line:
line = self.clean_signal_args(line)
line = line.replace("'str'", "str").replace('"str"', 'str')
lines[idx] = line
def fix_slot(self, lines):
"""
pyqtSignature decorator changed into pyqtSlot
clean decorator arguments
"""
for idx, line in enumerate(lines):
line = line.replace('@pyqtSignature', '@pyqtSlot')
if '@pyqtSlot' in line:
line = self.clean_signal_args(line)
line = line.replace("'str'", "str").replace('"str"', 'str')
lines[idx] = line
def fix_emit(self, lines):
"""
Refactor the pyqtSignal.emit() old-style into a new-style line.
Attempts also to create unexisting signals
Args:
lines -- the list of source code lines
"""
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line) and '.emit(' in line and 'SIGNAL(' in line:
parts = line.split('.emit(')
function = SIG_RE['fun_re'].search('('+parts[1])
if function is not None:
args = self.split_function(function.groups()[0])
diff = diff_parenthesis(args[-1])
parenthesis = ')' * abs(diff)
if diff < 0:
li = args[-1].rsplit(')', abs(diff))
args[-1] = ''.join(li)
if len(args) == 2 and args[1] == '()':
args.pop()
lines[count] = '%s.%s.emit(%s)%s\n' % (parts[0], self.remove_signal_slot(args[0])[0], \
', '.join(args[1:]), parenthesis)
count += self.create_signal(lines, count, args[0])
count += 1
def fix_translations(self, lines):
"""Fix the translation syntax.
Args:
lines -- the list of source code lines
"""
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if '.translate' in line:
ln = ''
parts = line.split('.translate')
for part in parts:
if part.endswith('QApplication'):
# QtGui has been already changed to QtWidgets
if part.endswith('QtWidgets.QApplication'):
ln += part[:-22] + 'QtCore.QCoreApplication'
else:
ln += part[:-12] + 'QCoreApplication'
else:
ln += part
ln = ln + '.translate'
ln = ln[:-11]
if '.UnicodeUTF8' in ln:
parts = ln.split('.UnicodeUTF8')
ln = ''
for part in parts:
if part.endswith('QApplication'):
if part.endswith('QtWidgets.QApplication'):
part = part[:-22]
else:
part = part[:-12]
# Maintain multilines syntax
part = part.rstrip(',').rstrip().rstrip(',')
ln = ln + part
lines[count] = ln + '\n'
elif '.trUtf8(' in line:
lines[count] = line.replace('trUtf8(', 'tr(')
count += 1
def fix_wheelevent(self, lines):
"""Fix the wheelEvent event.delta() syntax.
Args:
lines -- the list of source code lines
"""
# The function name must be matched by the re:
# (?<=def wheelEvent\(self,)(.*?)(?=\):))
count = 0
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if 'wheelEvent(' in line:
match = WHEEL_RE.search(line)
if match is not None:
indent = self.get_token_indent(line)
string = '%s.delta()' % match.group(0).strip()
count += 1
while count < len(lines):
line = lines[count]
if self.is_code_line(line):
if self.get_token_indent(line) <= indent:
# End of wheelEvent function
count -= 1
break
if string in line:
lines[count] = line.replace('.delta()', '.angleDelta().y()')
count += 1
count += 1
def fix_layoutmargin(self, lines):
"""Replace the QLayout method setMargin() by setContentsMargins()
Args:
lines -- the list of source code lines
"""
layouts = []
m_re = re.compile(r'[, =\(\-+]')
news = ('.setContentsMargins(', '.getContentsMargins()[0]')
for line in lines:
# Set the list of all layouts instanciated in the script
if 'Layout(' in line:
match = LAYOUT_RE.search(line.lstrip())
if match is not None:
name = match.group(3)
if name and name.endswith(('QGrid', 'QVBox', 'QHBox')):
# If matched, group(1) is the reference of the layout
layouts.append(match.group(1).strip())
for idx, line in enumerate(lines):
if self.is_code_line(line):
if '.setMargin(' in line:
parts = line.split('.setMargin(')
if parts[0].lstrip() in layouts:
val = parts[1].strip().rstrip(')').strip()
vals = ', '.join([val] * 4)
lines[idx] = '%s%s%s)\n' % (parts[0], news[0], vals)
elif '.margin(' in line:
ref = m_re.split(line.split('.margin')[0])[-1]
if ref in layouts:
lines[idx] = line.replace('.margin()', news[1])
def fix_qdesktopservices(self, lines):
"""Replace QDesktopServices by QStandardPaths.
This change is needed only for the methods displayName() and
storageLocation()
Args:
lines -- the list of source code lines
"""
fixme = "# FIXME$ Ambiguous syntax for QDesktopServices, can't refactor it.\n"
dsks = ['QDesktopServices()', 'QtGui.QDesktopServices()']
for line in lines:
if 'QDesktopServices' in line:
match = DSK_RE.search(line.lstrip())
if match is not None:
dsks.append(match.group(1).strip())
count = 0
while count < len(lines):
line = lines[count]
if not self.is_code_line(line):
count += 1
continue
if '.displayName(' in line:
method = '.displayName('
elif '.storageLocation(' in line:
method = '.storageLocation('
else:
count += 1
continue
parts = line.split(method)
sub = parts[0].split('=')
if len(sub) < 2:
count += 1
continue
if sub[1].strip() in dsks:
val = parts[1].strip().rstrip(')').strip()
try:
loc = val.split('.')[1]
except IndexError:
indent = self.get_token_indent(line)
lines.insert(count, '%s%s' % (indent, fixme))
count += 1
else:
method = method.replace('storage', 'writable')
cls = 'QStandardPaths'
lines[count] = '%s = %s%s%s.%s)\n' % (sub[0].rstrip(), cls, method, cls, loc)
self.modified['QStandardPaths'] = True
count += 1
def fix_qdate(self, lines):
"""Change QDate.setYMD() method to QDate.setDate().
Args:
lines -- the list of source code lines
"""
gen = self.find_subclassed_class(lines, 'QDate')
while 1:
try:
num = next(gen)
self.fix_instance_qdate(lines, num + 1)
except StopIteration:
break
dates = []
for idx, line in enumerate(lines):
if not self.is_code_line(line):
continue
if 'QDate(' in line:
match = DATE_RE.search(line.lstrip())
if match is not None:
dates.append(match.group(1).strip())
if '.setYMD(' in line:
inst = line.split('.setYMD')[0].lstrip()
if inst in dates:
lines[idx] = line.replace('setYMD', 'setDate')
def fix_instance_qdate(self, code, start):
"""Change QDate.setYMD() method to QDate.setDate() into a class wich
inherits QDate
Args:
code -- the list of source code lines
start -- the nummer of the second line of the class
"""
for idx, line in enumerate(code[start:]):
if self.is_class(line):
break
if 'self.setYMD(' in line:
code[idx+start] = line.replace('setYMD', 'setDate')
def fix_qgraphicsitem(self, lines):
"""Remove the scene from the arguments of a QGraphicsItem.
The QGraphicsScene is identified with these rules:
- Explicit: `scene=foo` or `scene` or `self.scene`
- If args[-2] is None then args[-1] is scene
- 1 arg: no scene possible
- 2 args: if no keyword `parent` the scene is not identified,
a FIXME will be added
- 3 args: args[2] is scene
- 4 args: no scene possible
- 6 args: args[5] is scene
Args:
lines -- the list of source code lines
"""
# TODO replace scale(float x, float y) to setTransform(QMatrix) or
# setScale(float) if float x == float y
items = ['QAbstractGraphicsShapeItem',
'QGraphicsEllipseItem',
'QGraphicsItem', # 'QGraphicsItemGroup',
'QGraphicsLineItem',
'QGraphicsPathItem',
'QGraphicsPixmapItem',
'QGraphicsPolygonItem',
'QGraphicsRectItem',
'QGraphicsSimpleTextItem',
'QGraphicsTextItem']
for item in items:
self.find_graphics_items(lines, item)
def find_graphics_items(self, code, obj):
fixme = "# FIXME$ Can't identify the QGraphicsScene in the arguments "\
"of the QGraphicsItem"
count = 0
while count < len(code):
scene = False
line = code[count]
if not self.is_code_line(line) or line.lstrip().startswith(('import ', 'from ')):
count += 1
continue
if obj in line:
if self.is_class(line):
count = self.refactor_qgraphics_subclass(code, count, obj)
continue
parts = line.split(obj)
if parts[1].startswith('Group'):
obj += 'Group'
parts = line.split(obj)
if not parts[1].lstrip().startswith('('):
# Not instantiated
count += 1
continue
refs = parts[0].split('=')
if len(refs) < 2:
# Unknown object
count += 1
continue
ref = refs.pop(0)
ind = self.get_token_indent(line)
args = self.get_args(parts[1])
scene, args = self.find_keyword('scene', args)
if not scene:
# 0: ()
# 1: (parent)
# 1: (object)
if len(args) <= 1:
count += 1
continue
# 2: (*args, **kwargs)
# 2: (object, parent)
# 2: (parent, scene) -- possible problem
elif len(args) == 2:
if args[0] in ('*args', '* args') and args[1] in ('**kwargs', '** kwargs'):
# (*args, **kwargs)
count += 1
continue
elif args[-2] == 'None':
# (parent=None, scene)
scene = args.pop()
else:
parent_index = self.find_keyword_index('parent', args)
if parent_index == 0:
# (parent, scene)
scene = args.pop()
elif parent_index == 1:
# (object, parent)
count += 1
continue
else:
# (object, parent) or (parent, scene)
code.insert(count, '%s%s\n' % (ind, fixme))
count += 2
continue
# 3: (object, parent, scene)
elif len(args) == 3:
scene = args.pop()
# 4: (x, y, w, h)
# 5: (x, y, w, h, parent)
elif len(args) == 4 or len(args) == 5:
count += 1
continue
# 6: (x, y, w, h, parent, scene)
elif len(args) == 6:
scene = args.pop()
else:
code.insert(count, '%s%s\n' % (ind, fixme))
count += 2
continue
code[count] = line.replace(parts[1], '(%s)\n' % ', '.join(args))
if scene and scene != 'None':
string = '%s%s.addItem(%s)\n' % (ind, scene, ref.strip())
count += 1
code.insert(count, string)
count += 1
def refactor_qgraphics_subclass(self, lines, count, item):
fixme = "# FIXME$ Can't identify the QGraphicsScene in arguments of "\
"the QGraphicsItem"
cls = self.get_classname(lines[count])
count += 1
indent = ''
while count < len(lines):
scene = False
line = lines[count]
if not self.is_code_line(line):
count += 1
continue
if line.lstrip().startswith('def __init__'):
indent = self.get_token_indent(line)
count += 1
continue
elif line.lstrip().startswith('super(%s' % cls):
ind = self.get_token_indent(line)
parts = line.split('__init__')
args = self.get_args(parts[1])
elif '%s.__init__' % item in line:
ind = self.get_token_indent(line)
parts = line.split('__init__')
args = self.get_args(parts[1])
elif self.get_token_indent(line) < indent:
# Leaving the class
return count + 1
else:
count += 1
continue
scene, args = self.find_keyword('scene', args)
if not scene:
# 0: (self)
# 1: (self, parent)
# 1: (self, object)
if len(args) <= 2:
return count + 1
# 2: (self, *args, **kwargs)
# 2: (self, object, parent)
# 2: (self, parent, scene) -- possible problem
elif len(args) == 3:
if args[1] in ('*args', '* args') and args[2] in ('**kwargs', '** kwargs'):
# (self, *args, **kwargs)
return count + 1
elif args[-2] == 'None':
# (self, parent=None, scene)
scene = args.pop()
else:
parent_index = self.find_keyword_index('parent', args)
if parent_index == 1:
# (self, parent, scene)
scene = args.pop()
elif parent_index == 2:
# (self, object, parent)
return count + 1
else:
# (self, object, parent) or (self, parent, scene)
lines.insert(count, '%s%s\n' % (ind, fixme))
return count + 2
# 3: (self, object, parent, scene)
elif len(args) == 4:
scene = args.pop()
# 4: (self, x, y, w, h)
# 5: (self, x, y, w, h, parent)
elif len(args) == 5 or len(args) == 6:
return count + 1
# 6: (self, x, y, w, h, parent, scene)
elif len(args) == 7:
scene = args.pop()
else:
lines.insert(count, '%s%s\n' % (ind, fixme))
return count + 2
lines[count] = line.replace(parts[1], '(%s)\n' % ', '.join(args))
if scene != 'None':
count += 1
lines.insert(count, '%sif %s is not None: %s.addItem(self)\n' % (ind, scene, scene))
return count + 1
return count + 1
def get_args(self, string):
# Remove the parenthesis
string = string.strip()[1:-1]
args = string.split(',')
return [arg.strip() for arg in args]
def find_keyword(self, keyword, args):
keyarg = False
for idx, arg in enumerate(args):
if arg.startswith((keyword+'=', keyword+' =')):
keyarg = args.pop(idx).split('=')[1].strip()
break
elif arg in (keyword, 'self.'+keyword):
keyarg = args.pop(idx)
break
return keyarg, args
def find_keyword_index(self, keyword, args):
keyidx = -1
for idx, arg in enumerate(args):
if arg.startswith((keyword+'=', keyword+' =')) or arg in (keyword, 'self.'+keyword):
keyidx = idx
break
return keyidx
def fix_qheader(self, lines):
"""Rename some QHeaderView's methods.
Args:
code -- the list of source code lines
"""
headers = ['horizontalHeader()', 'verticalHeader()']
for line in lines:
if '.horizontalHeader()' in line or '.verticalHeader()' in line:
try:
ref, _ = line.split('=')
headers.append(ref.strip())
except:
pass
headers = tuple(headers)
olds = ('.setMovable', '.isMovable',
'.setClickable', '.isClickable',
'.setResizeMode', '.resizeMode')
news = ('.setSectionsMovable', '.sectionsMovable',
'.setSectionsClickable', '.sectionsClickable',
'.setSectionResizeMode', '.sectionResizeMode')
for old, new in zip(olds, news):
gen = self.find_string(lines, old)
while 1:
try:
num = next(gen)
begin, _ = lines[num].split(old)
if begin.endswith(headers):
lines[num] = lines[num].replace(old, new)
except StopIteration:
break
def fix_qinputdialog(self, lines):
"""Replace the method getInteger() by getInt() in QInputDialog class.
Args:
code -- the list of source code lines
"""
for idx, line in enumerate(lines):
if 'QInputDialog.getInteger(' in line:
lines[idx] = line.replace('.getInteger(', '.getInt(')
def fix_qchar(self, lines):
"""Replace QChar() by unichr() for Python 2 and chr() for Python 3.
Args:
code -- the list of source code lines
"""
is_qchar = False
for idx, line in enumerate(lines):
if self.is_code_line(line):
# TODO: Convert this to use regular expressions.
# use PyQt5 since this method is called after change_import_lines
line = line.replace('PyQt5.QtCore.QChar', 'QChar').replace('PyQt5.Qt.QChar', 'QChar')\
.replace('QtCore.QChar', 'QChar').replace('Qt.QChar', 'QChar')
lines[idx] = line
if '].connect(' in line or 'pyqtSignal(' in line:
line = line.replace("'QChar'", "QChar").replace('"QChar"', 'QChar')\
.replace("QChar", "'QChar'")
lines[idx] = line
if 'QChar' in line.replace("'QChar'", "").replace('"QChar"', ''):
is_qchar = True
if is_qchar:
for idx in range_(len(lines)):
if not self.is_code_line(lines[idx]) or lines[idx].lstrip().startswith(('import ', 'from ', '__')):
continue
lines.insert(idx, "\n")
ind = self.find_next_indent(lines[idx+1:])
if not ind:
ind = " "
text = "try:\n%sQChar = unichr\nexcept NameError:\n"\
"%s# Python 3\n%sQChar = chr\n" % (ind, ind, ind)
lines.insert(idx, text)
break
def fix_qstring(self, lines):
"""Replace QString() by unicode() for Python 2 and str() for Python 3.
Also updates QString and QStringList usage as signal arguments.
Args:
code -- the list of source code lines
"""
is_qstring = False
is_qstring_list = False
for idx, line in enumerate(lines):
if self.is_code_line(line):
# TODO: This does not handle QStringListModel properly.
# TODO: Convert this to use regular expressions.
# use PyQt5 since this method is called after change_import_lines
line = line.replace('PyQt5.QtCore.QString', 'QString').replace('PyQt5.Qt.QString', 'QString')\
.replace('QtCore.QString', 'QString').replace('Qt.QString', 'QString')
lines[idx] = line
if '].connect(' in line or 'pyqtSignal(' in line:
line = line.replace("'QString'", "QString").replace('"QString"', 'QString')\
.replace("'QStringList'", "QStringList").replace('"QStringList"', 'QStringList')\
.replace("QString", "'QString'").replace("'QString'List", "'QStringList'")\
.replace("'QStringList'Model", "QStringListModel")
lines[idx] = line
if 'QString' in line.replace('QStringListModel', '').replace('QStringList', '')\
.replace("'QString'", "").replace('"QString"', ''):
is_qstring = True
if 'QStringList' in line.replace('QStringListModel', '')\
.replace("'QStringList'", "").replace('"QStringList"', ''):
is_qstring_list = True
if is_qstring or is_qstring_list:
for idx in range_(len(lines)):
if not self.is_code_line(lines[idx]) or lines[idx].lstrip().startswith(('import ', 'from ', '__')):
continue
lines.insert(idx, "\n")
if is_qstring_list:
text = "QStringList = list\n"
lines.insert(idx, text)
if is_qstring:
ind = self.find_next_indent(lines[idx+1:])
if not ind:
ind = " "
text = "try:\n%sQString = unicode\nexcept NameError:\n"\
"%s# Python 3\n%sQString = str\n" % (ind, ind, ind)
lines.insert(idx, text)
break
def fix_qglobal(self, lines):
"""Replace calls to qInstallMsgHandler() with calls to qInstallMessageHandler().
Args:
code -- the list of source code lines
"""
for idx, line in enumerate(lines):
if self.is_code_line(line):
lines[idx] = line.replace('qInstallMsgHandler(', 'qInstallMessageHandler(')
def fix_qvariant(self, lines):
"""Remove calls to obsolete QVariant conversion functions.
Args:
code -- the list of source code lines
"""
for idx, line in enumerate(lines):
if self.is_code_line(line):
for method in QVARIANT_OBSOLETE_METHODS:
line = line.replace('.'+method+'()', '')
lines[idx] = line
def find_subclassed_class(self, code, classname):
"""Find a class instanciation wich subclass a Qt class.
Args:
code -- the list of source code lines
classname -- the name of the subclassed class
Returns:
int(nummer of class line)
"""
for idx, line in enumerate(code):
if self.is_class(line):
try:
if classname in line.split('(')[1]:
yield idx
except:
pass
def find_string(self, code, string):
"""Find a string into a source code.
Args:
code -- the list of source code lines
string -- the string
Returns:
int(nummer of line code) if found
"""
for idx, line in enumerate(code):
if string in line:
yield idx
def clean_args(self, string):
"""Returns the list of arguments of an emit() method.
Args:
string -- The last part of the line code
"""
elem = string.split(',')
if len(elem) > 1:
return [e.strip().strip(')').strip() for e in elem[1:]]
return []
def count_ref(self, string):
return len(string.split(','))
def replace_qApp(self, lines):
"""Replace qApp usage with QApplication.static_method() or QApplication.instance().method().
Args:
lines -- source code
"""
for idx, line in enumerate(lines):
if not self.is_code_line(line) or not 'qApp' in line:
continue
if line.lstrip().startswith(('import ', 'from ')):
line = self.replace_module(line, 'qApp', 'QApplication')
else:
# use QtWidgets.qApp since this method is called after change_module_name
for func in QAPP_STATIC_METHODS:
line = re.sub(r'(\A|[^a-zA-Z0-9_.\'"]|Qt\.|QtWidgets\.)qApp\.'+func+r'(\Z|[^a-zA-Z0-9_])',
r'\1QApplication.'+func+r'\2', line)
line = re.sub(r'(\A|[^a-zA-Z0-9_.\'"]|Qt\.|QtWidgets\.)qApp(\Z|[^a-zA-Z0-9_])',
r'\1QApplication.instance()\2', line)
lines[idx] = line
def replace_classnames(self, lines):
"""Rename some classe's names.
QMatrix to QTransform
QIconEngineV2 to QIconEngine
Args:
lines -- source code
"""
# TODO: Convert this to use regular expressions like in replace_qApp above,
# so that only the appropriate instances of olds are converted.
olds = ['QMatrix', 'QIconEngineV2']
news = ['QTransform', 'QIconEngine']
for idx, line in enumerate(lines):
if self.is_code_line(line):
for old, new in zip(olds, news):
line = line.replace(old, new)
lines[idx] = line
def is_code_line(self, line):
"""Returns True if a line is not empty, nor a comment, nor a docstring.
Args:
line -- the line code
Returns:
True if line is a valid code line
"""
if not line.strip() or self.is_comment(line) or self.is_string(line) or self.is_docstring(line):
return False
return True
def is_comment(self, line):
"""Returns True if a line is a comment.
Args:
line -- the line code
"""
try:
return line.lstrip()[0] == '#'
except IndexError:
# Empty line
return False
def is_string(self, line):
"""Returns True if a line is a string.
Args:
line -- the line code
"""
return line.lstrip().startswith(('"', "'"))
def is_docstring(self, line):
"""Returns True if a line is a docstring.
Args:
line -- the line code
"""
return line.lstrip().startswith(('"""', "'''"))
def is_class(self, line):
"""Returns True if a line is a class definition line.
Args:
line -- the line code
"""
return line.lstrip().startswith('class ')
def is_function(self, line):
"""Returns True if a line is a function definition line.
Args:
line -- the line code
"""
return line.lstrip().startswith('def ')
def get_classname(self, string):
"""Returns the name of a class.
Args:
string -- the class's definition line code
"""
match = CLS_RE.search(string)
if match is not None:
return match.group(0).strip()
def get_token_indent(self, string):
"""Returns the indentation of a line.
args:
string -- the line
"""
ind = tokenize.INDENT
tokens = tokenize.generate_tokens(StringIO(string).readline)
for typ, chain, _, _, _ in tokens:
if typ == ind:
return chain
return ''
def find_next_indent(self, lines):
"""Returns the first indentation found into a list of lines.
Args:
lines -- the list of lines
"""
for line in lines:
indent = self.get_token_indent(line)
if indent:
return indent
return ''
def count_parenthesis(self, line, start, end):
"""Count the occurrences of an open parenthesis into a string.
Args:
line -- the string
start -- the word where the count begins
end -- the word where the count finishes
Returns:
int(occurences)
"""
tokens = tokenize.generate_tokens(StringIO(line).readline)
for _, st, _, _, _ in tokens:
if st == start:
count = 0
elif st == '(':
count += 1
elif st == end:
return count
def find_closing_parenthesis(self, line, prefix=None):
"""Find the closing parenthesis according to a given opening parenthesis.
Args:
line -- one logical line of code
prefix -- the word that precedes the opening parenthesis
Returns:
tuple(ocol, ccol) where ocol is the column of the opening parenthesis
and ccol is the column of the closing parenthesis
"""
begin = not prefix
count = 0
ocol = ccol = 0
tokens = tokenize.generate_tokens(StringIO(line).readline)
for typ, st, bg, _, _ in tokens:
if typ == tokenize.NL:
if not begin:
ocol += bg[1]+1
ccol += bg[1]+1
elif prefix and st == prefix:
begin = True
if prefix == '(':
count += 1
ocol += bg[1]
elif begin and st == '(':
if not count:
ocol += bg[1]
count += 1
elif count and st == ')':
count -= 1
if not count:
return ocol, ccol+bg[1]
return len(line), len(line)
def remove_fromUtf8(self, lines):
"""Remove calls to QString.fromUtf8 often redefined as _fromUtf8
Args:
lines -- the list of source code lines
"""
count = 0
while count < len(lines):
line = lines[count]
if not self.is_code_line(line):
count += 1
continue
# remove the definition of the _fromUtf8 function or redefine it
if line.strip() == '_fromUtf8 = QtCore.QString.fromUtf8':
if count > 0 and lines[count-1].strip() == 'try:' and \
count+1 < len(lines) and lines[count+1].strip() == 'except AttributeError:':
if count+2 < len(lines) and lines[count+2].strip() == '_fromUtf8 = lambda s: s':
i, j = count-1, count+3
if j < len(lines) and lines[j].strip() == '':
j += 1
lines[i:j] = []
count -= 1
continue
elif count+3 < len(lines) and lines[count+2].strip() == 'def _fromUtf8(s):' and \
lines[count+3].strip() == 'return s':
i, j = count-1, count+4
if j < len(lines) and lines[j].strip() == '':
j += 1
lines[i:j] = []
count -= 1
continue
else:
indent = self.get_token_indent(line)
lines[count] = indent + '_fromUtf8 = lambda s: s\n'
continue
line = line.replace("PyQt4.QtCore.QString.fromUtf8(", "_fromUtf8(")\
.replace("PyQt4.Qt.QString.fromUtf8(", "_fromUtf8(")\
.replace("QtCore.QString.fromUtf8(", "_fromUtf8(")\
.replace("Qt.QString.fromUtf8(", "_fromUtf8(")\
.replace("QString.fromUtf8(", "_fromUtf8(")
while True:
open_idx, close_idx = self.find_closing_parenthesis(line, '_fromUtf8')
if open_idx >= len(line):
break
line = line[:open_idx-9] + line[open_idx+1:close_idx] + line[close_idx+1:]
lines[count] = line
count += 1
def get_signal(self, strings):
sig = strings.pop(0)
for idx, s in enumerate(strings):
if sig.endswith(('")', "')")):
return sig, strings[idx:]
sig = sig + ', ' + s
def refactor_signal(self, string):
olds = ('clicked(bool)', 'clicked()', 'triggered(bool)', 'triggered()')
news = ('clicked[bool]', 'clicked[()]', 'triggered[bool]', 'triggered[()]')
match = SIG_RE['sig_re'].search(string)
if match is not None:
sig = match.group(0)
try:
idx = olds.index(sig)
return news[idx]
except ValueError:
pass
return self.clean_signal(sig)
return False
def get_slot(self, seq):
slot = ''
for string in seq:
if 'SLOT' in string:
match = SIG_RE['slot_re'].search(string)
if match is not None:
slot = match.group(0).split('(')[0]
break
if slot:
seq.remove(string)
return slot.strip()
def clean_signal_args(self, signal):
if self._pyqt5:
signal = signal.replace('const char*', 'str').replace('const char *', 'str')
else:
signal = signal.replace('const char*', 'const_char_star_arg')\
.replace('const char *', 'const_char_space_star_arg')
signal = signal.replace(' const ', '').replace('const ', '')
signal = signal.replace(' * ', '').replace(' *', '').replace('* ', '').replace('*', '')
signal = signal.replace(' & ', '').replace(' &', '').replace('& ', '').replace('&', '')
signal = signal.replace("PyQt_PyObject", "'PyQt_PyObject'")
if self._pyqt5:
# TODO: Convert this to use regular expressions.
signal = signal.replace("PyQt4.QtCore.QString", "QString").replace("PyQt4.Qt.QString", "QString")\
.replace("QtCore.QString", "QString").replace("Qt.QString", "QString")\
.replace("QString", "'QString'").replace("'QString'List", "'QStringList'")\
.replace("'QStringList'Model", "QStringListModel")
else:
signal = signal.replace('const_char_star_arg', '"const char*"')\
.replace('const_char_space_star_arg', '"const char *"')
return signal
def clean_signal(self, signal):
signal = self.clean_signal_args(signal)
signal = signal.replace('()', '')
signal = signal.replace('(', '[').replace(')', ']')
return signal
def replace_module(self, line, old_mod, new_mod=None):
# TODO: Convert this to use regular expressions.
if new_mod:
line = line.replace(','+old_mod+',', ','+new_mod+',')\
.replace(', '+old_mod+',', ', '+new_mod+',')\
.replace(','+old_mod+'\n', ','+new_mod+'\n')\
.replace(', '+old_mod+'\n', ', '+new_mod+'\n')\
.replace(','+old_mod+'\\', ','+new_mod+'\\')\
.replace(','+old_mod+' \\', ','+new_mod+' \\')\
.replace(', '+old_mod+'\\', ', '+new_mod+'\\')\
.replace(', '+old_mod+' \\', ', '+new_mod+' \\')\
.replace(' '+old_mod+', ', ' '+new_mod+', ')\
.replace(' '+old_mod+',', ' '+new_mod+',')\
.replace(' '+old_mod+'\n', ' '+new_mod+'\n')\
.replace(' '+old_mod+'\\', ' '+new_mod+'\\')\
.replace(' '+old_mod+' \\', ' '+new_mod+' \\')
else:
line = line.replace(','+old_mod+',', ',')\
.replace(', '+old_mod+',', ',')\
.replace(','+old_mod+'\n', '\n')\
.replace(', '+old_mod+'\n', '\n')\
.replace(','+old_mod+'\\', '\\')\
.replace(','+old_mod+' \\', ' \\')\
.replace(', '+old_mod+'\\', '\\')\
.replace(', '+old_mod+' \\', ' \\')\
.replace(' '+old_mod+', ', ' ')\
.replace(' '+old_mod+',', ' ')\
.replace(' '+old_mod+'\n', '\n')\
.replace(' '+old_mod+'\\', '\\')\
.replace(' '+old_mod+' \\', ' \\')
# Remove empty in between lines
return L_SEP.join(l for l in line.split(L_SEP) if l.strip()) + L_SEP
def change_import_lines(self, lines):
"""Refactor the import's lines.
Args:
lines -- list of lines of source code
Returns:
list(lines)
"""
news = []
count = 0
def set_qstandardpaths(txt):
if self.modified['QStandardPaths']:
news.append(txt.replace('PyQt4', 'PyQt5') + '.QtCore import QStandardPaths\n')
self.modified['QStandardPaths'] = False
while count < len(lines):
line = lines[count]
if not self.is_code_line(line):
news.append(line)
count += 1
continue
ls_line = line.lstrip()
if line.lstrip().startswith(('import ', 'from ')):
line = line.rstrip() + '\n'
if self._added_pyqtSignal:
line = self.replace_module(line, 'SIGNAL', 'pyqtSignal')
else:
line = self.replace_module(line, 'SIGNAL', '')
line = self.replace_module(line, 'SLOT', '')
if self._pyqt5:
line = self.replace_module(line, 'QStringList', '')
line = self.replace_module(line, 'QString', '')
if line.strip() == 'import' or line.rstrip().endswith(' import'):
count += 1
continue
if not self._pyqt5:
news.append(line)
count += 1
continue
if ls_line.startswith('from PyQt4.QtCore ') and self.modified['QStandardPaths']:
news.append(line.replace('PyQt4', 'PyQt5').rstrip() + ', QStandardPaths\n')
self.modified['QStandardPaths'] = False
elif ls_line.startswith('from PyQt4.QtCore ') and 'QChar' in line:
elems = [c.strip() for c in line[25:].split(',')]
elems.remove('QChar')
if elems:
news.append('from PyQt5.QtCore import ' + ', '.join(elems) + '\n')
elif ls_line.startswith('from PyQt4 import '):
line = self.refactor_modules_import(line)
if line:
txt = self.reindent_import_line(line)
news.append(txt)
set_qstandardpaths(line.split(' import ')[0])
elif ls_line.startswith('from PyQt4.Qt import '):
parts = line.split('import ')
core, gui, wdg, pr, md, ogl, cm = self.sort_qt_classes(parts[1])
if core:
stcore = "".join([parts[0].replace('PyQt4.Qt ',
'PyQt5.QtCore import '), ', '.join(core)])
txt = self.reindent_import_line(stcore)
news.append(txt)
if gui:
stgui = "".join([parts[0].replace('PyQt4.Qt ',
'PyQt5.QtGui import '), ', '.join(gui)])
txt = self.reindent_import_line(stgui)
news.append(txt)
if wdg:
stwdg = "".join([parts[0].replace('PyQt4.Qt ',
'PyQt5.QtWidgets import '), ', '.join(wdg)])
txt = self.reindent_import_line(stwdg)
news.append(txt)
self._has_qtwidget_import = True
if pr:
stpr = "".join([parts[0].replace('PyQt4.Qt ',
'PyQt5.QtPrintSupport import '), ', '.join(pr)])
txt = self.reindent_import_line(stpr)
news.append(txt)
if md:
stmd = "".join([parts[0].replace('PyQt4.Qt ',
'PyQt5.QtMultimedia import '), ', '.join(md)])
txt = self.reindent_import_line(stmd)
news.append(txt)
if ogl:
stogl = "".join([parts[0].replace('PyQt4.Qt ',
'PyQt5.QtOpenGL import '), ', '.join(ogl)])
txt = self.reindent_import_line(stogl)
news.append(txt)
if cm:
txt = L_SEP.join(cm) + L_SEP
news.append(txt)
set_qstandardpaths(line.split('.Qt')[0])
elif ls_line.startswith('from PyQt4.QtGui '):
parts = line.split('import ')
core, gui, wdg, pr, md, cm = self.sort_qtgui_classes(parts[1])
if core:
stcore = "".join([parts[0].replace('PyQt4.QtGui ',
'PyQt5.QtCore import '), ', '.join(core)])
txt = self.reindent_import_line(stcore)
self._has_qtwidget_import = True
news.append(txt)
if gui:
stgui = "".join([parts[0].replace('PyQt4', 'PyQt5'),
'import ', ', '.join(gui)])
txt = self.reindent_import_line(stgui)
news.append(txt)
if wdg:
stwdg = "".join([parts[0].replace('PyQt4.QtGui ',
'PyQt5.QtWidgets import '), ', '.join(wdg)])
txt = self.reindent_import_line(stwdg)
self._has_qtwidget_import = True
news.append(txt)
if pr:
stpr = "".join([parts[0].replace('PyQt4.QtGui ',
'PyQt5.QtPrintSupport import '), ', '.join(pr)])
txt = self.reindent_import_line(stpr)
news.append(txt)
if md:
stmd = "".join([parts[0].replace('PyQt4.QtGui ',
'PyQt5.QtMultimedia import '), ', '.join(md)])
txt = self.reindent_import_line(stmd)
news.append(txt)
if cm:
txt = L_SEP.join(cm) + L_SEP
news.append(txt)
set_qstandardpaths(line.split('.QtGui')[0])
elif ls_line.startswith('from PyQt4.QtWebKit '):
parts = line.split('import ')
wb, wdg = self.sort_qtwebkit_classes(parts[1])
if wb:
chain = "".join([parts[0].replace('PyQt4', 'PyQt5'),
'import ', ', '.join(wb)])
txt = self.reindent_import_line(chain)
news.append(txt)
if wdg:
chain = "".join([parts[0].replace('PyQt4.QtWebKit',
'PyQt5.QtWebKitWidgets'), 'import ', ', '.join(wdg)])
txt = self.reindent_import_line(chain)
news.append(txt)
else:
line = line.replace('PyQt4', 'PyQt5')
news.append(line)
count += 1
return news
def refactor_modules_import(self, line):
"""Apply the changes to a import line.
Args:
line -- the line
"""
parts = line.split('import ')
chain = parts[0].replace('PyQt4', 'PyQt5') + 'import '
end = parts[1].replace('(', '').replace(')', '').replace('\\', '')
modules = set([name.strip() for name in end.split(',')
if name.strip()])
if 'QtGui' in modules and not self.modified['QtGui']:
modules.remove('QtGui')
if self.modified['QtCore']:
modules.add('QtCore')
if self.modified['QtWidgets']:
modules.add('QtWidgets')
self._has_qtwidget_import = True
if 'QtWebKit' in modules and not self.modified['QtWebKit']:
modules.remove('QtWebKit')
if self.modified['QtWebKitWidgets']:
modules.add('QtWebKitWidgets')
if self.modified['QtMultimedia'] and not 'QtMultimedia' in modules:
modules.add('QtMultimedia')
if self.modified['QtPrintSupport']:
modules.add('QtPrintSupport')
if not modules:
return None
modules = list(modules)
modules.sort()
return chain + ', '.join(modules)
def sort_qtgui_classes(self, chain):
"""Sort the classes from a QtGui import line.
Args:
chain -- the classe's names in one line
Returns:
Six class lists:
QtCore, QtGui, QtWidgets, QtPrintSupport, QtMultimedia, comments
"""
names = [line.strip(',') for line in chain.split(L_SEP)]
core = []
gui = []
widgets = []
printer = []
media = []
cm = []
for name in names:
name = name.replace('\\', '')
cls = name.replace('(', '').replace(')', '').strip()
if not cls:
continue
if self.is_comment(cls):
cm.append(cls)
elif cls in CLASSES['QtCore']:
core.append(cls)
elif cls in CLASSES['QtWidgets']:
widgets.append(cls)
elif cls in CLASSES['QtMultimedia']:
media.append(cls)
elif cls in CLASSES['QtPrintSupport']:
printer.append(cls)
else:
if cls == 'QIconEngineV2':
cls = 'QIconEngine'
elif cls == 'QMatrix':
cls = 'QTransform'
gui.append(cls)
return core, gui, widgets, printer, media, cm
def sort_qt_classes(self, chain):
"""
Sort the classes from a qt import line
Args:
chain -- the classe's names in one line
Returns:
Seven class lists:
QtCore, QtGui, QtWidgets, QtPrintSupport, QtMultimedia,
QtOpenGL, comments
"""
core, old_gui, widgets, printer, media, cm = self.sort_qtgui_classes(chain)
gui = []
opengl = []
for cls in old_gui:
if cls in CLASSES['QtOpenGL']:
opengl.append(cls)
else:
gui.append(cls)
return core, gui, widgets, printer, media, opengl, cm
def sort_qtwebkit_classes(self, chain):
"""Sort the classes from a QtWebkit import line.
Args:
chain -- the classe's names in one line
Returns:
Two lists: QtWebkit and QtWebKitWidgets classes
"""
names = chain.split(',')
olds = []
news = []
for name in names:
name = name.replace('\\', '')
cls = name.strip().replace('(', '').replace(')', '')
if not cls:
continue
if cls in CLASSES['QtWebKitWidgets']:
news.append(cls)
else:
olds.append(cls)
return olds, news
def reindent_import_line(self, line):
"""Rewrite a long import line into a multiline.
The lines have maximum 80 characters and the indentations are fixed at
the column of the first open parenthesis of the first line.
Args:
line -- the original line
Returns:
Multiline
"""
if len(line) < 81:
return line + '\n'
begin, end = line.split('import ')
txt = begin + 'import ('
cls = end.lstrip().split(',')
lines = []
indent = self.get_import_indent(len(txt)-1)
for cl in cls:
cl = cl.rstrip() + ','
if len(txt) + len(cl) < 81:
txt += cl
else:
txt += '\n'
lines.append(txt)
txt = indent + cl
lines.append(txt[:-1] + ')')
return "".join(lines) + '\n'
def get_import_indent(self, length):
"""Returns the indentation for a multiline import.
Args:
length -- the length of the string `from foo import`
returns:
str()
"""
if self.indent == ' ':
return ' ' * length
# Assume a tab is equivalent of four spaces
return self.indent * (length / 4)
def clean_file(self, lines):
fixs = []
lineno = 1
for i, line in enumerate(lines):
if self.is_comment(line):
if 'FIXME$' in line:
lines[i] = line.replace('FIXME$', 'FIXME')
fixs.append('%6d %s' %(lineno, line.lstrip().lstrip('# FIXME$')))
lineno += line.count('\n')
return lines, fixs
def rcut(self, string, chars):
"""Remove the trailing characters from a string.
Args:
string -- the string
chars -- the sequence of characters
"""
if string.endswith(chars):
string = string[:-len(chars)]
return string
def convert_in_one_line(self, strings):
lines = strings.split('\n')
if len(lines) > 1:
return lines[0] + ''.join(l.lstrip() for l in lines[1:])
return strings
def save_changes(self, lines):
with open(self.dest, 'wb') as outf:
outf.write(''.join(lines).replace('\n', L_SEP).encode(self.tools.encoding))
mode = os.stat(self.source).st_mode
os.chmod(self.dest, mode)
def print_(self, msg):
sys.stdout.write('%s\n' % msg)
if self.log:
with open(self.log, 'a') as outf:
if PY_VERS < 3:
outf.write(('%s%s' % (msg, L_SEP)).encode(self.tools.encoding))
else:
outf.write('%s%s' % (msg, L_SEP))
class Tools(object):
def __init__(self):
self.encoding = 'utf-8'
self.last_error = ''
def read_python_source(self, filename):
"""Return the source code.
Args:
filename -- the file name
Returns:
list(lines)
"""
self.encoding = self.get_encoding(filename)
if self.encoding is None:
return None
return self.get_content(filename)
def get_content(self, filename):
if PY_VERS < 3:
try:
with open_(filename, "rU", encoding=self.encoding) as inf:
content = inf.read()
except (IOError, UnicodeDecodeError) as why:
self.last_error = why
return None
else:
try:
with open(filename, "r", encoding=self.encoding) as inf:
content = inf.read()
except (IOError, UnicodeDecodeError) as why:
self.last_error = why
return None
return content.split('\n')
def get_encoding(self, path):
lines = []
try:
with open(path, 'rb') as inf:
try:
lines.append(inf.readline())
lines.append(inf.readline())
except:
pass
except IOError as why:
sys.stdout.write("Can't read the file `%s`\nReason: %s\n" % (path, why))
return None
return self.read_encoding(lines)
def read_encoding(self, lines):
l1, l2 = lines
coding = None
bom = False
default = 'utf-8'
if not lines or lines == [b'', b'']:
return default
if l1.startswith(BOM_UTF8):
bom = True
l1 = l1[3:]
default = 'utf-8-sig'
if not l1:
return default
coding = self.find_comment(l1, bom)
if coding is not None:
return coding
coding = self.find_comment(l2, bom)
if coding is not None:
return coding
return default
def find_comment(self, chain, bom):
comment = re.compile(r"coding[:=]\s*([-\w.]+)")
try:
string = chain.decode('ascii')
except UnicodeDecodeError:
return None
matches = comment.findall(string)
if not matches:
return None
codings = ("latin-1", "iso-8859-1", "iso-latin-1")
codings_with_dash = tuple(coding+'-' for coding in codings)
enc = matches[0][:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
encoding = "utf-8"
elif enc in codings or enc.startswith(codings_with_dash):
encoding = "iso-8859-1"
else:
sys.stdout.write("Non-standard encoding: %s\n" % enc)
encoding = enc
try:
codec = lookup(encoding)
except LookupError:
sys.stdout.write("Can't read the encoding: %s\n" % encoding)
return None
if bom:
if codec.name != 'utf-8':
sys.stdout.write("Inconsistant encoding: %s\n" % encoding)
return None
encoding += '-sig'
return encoding
def get_code_lines(self, filename):
count = 0
source = self.read_python_source(filename)
if source is None:
# error reading input file
return None
if not source[-1]:
source.pop()
if not source:
#self.last_error = 'File is empty'
#return None
return []
orig = ['%s\n' % l for l in source]
lines = []
gen = self.get_num_physical_lines(filename)
while 1:
try:
num = next(gen)
if not num:
return False
lines.append(''.join(orig[count:num]))
count = num
except StopIteration:
break
return lines
def get_num_physical_lines(self, filename):
"""Returns the line nummer where a logical line ends.
The converter works with a list of logical lines, not physical lines.
Args:
filename -- the file name
Returns:
int(lineno)
"""
if PY_VERS < 3:
inf = open_(filename, "r", encoding=self.encoding)
src = inf.readline
else:
inf = open(filename, "r", encoding=self.encoding)
src = inf.readline
new = True
com = False
tokens = tokenize.generate_tokens(src)
# tokens = (token type, token string, (srow, scol), (erow, ecol), line)
try:
for typ, _, _, end, ln in tokens:
if typ == tokenize.ENDMARKER:
# End of file
yield end[0]
elif typ == tokenize.NEWLINE:
# End of logical line
new = True
yield end[0]
elif typ == tokenize.COMMENT and new:
# One line comment
com = True
new = True
yield end[0]
elif typ == tokenize.NL:
# End of physical line
if com:
com = False
new = True
elif not ln.strip() and new:
# Empty line
new = True
yield end[0]
else:
new = False
elif typ == tokenize.ERRORTOKEN:
# Error token
raise Exception('Error token encountered')
else:
new = False
except Exception as why:
sys.stdout.write('Except: %s\nLine: %s\n%s' %(why, end, ln))
self.last_error = why
yield False
finally:
inf.close()
class Main(object):
def __init__(self, args):
self.copied = {}
self.path = None
self.nosubdir = False
self.followlinks = False
self.destdir = None
self.write_diff = False
self.write_diffs = False
self.filename_diff = False
self.nopyqt5 = False
parser = argparse.ArgumentParser(description='Convert a source code '
'written for PyQt4 into a valid code for PyQt5')
parser.add_argument("path",
help="Path of a file or a directory.\nThe file may be "
"a source code python or a text file wich contains the "
"names of the files to be converted separated by a new "
"line.")
parser.add_argument("--nosubdir", action="store_true",
help="Don't process into sub-directories."
" Default: False")
parser.add_argument("--followlinks", action="store_true",
help="Visit directories pointed to by symlinks."
" Default: False")
parser.add_argument("-o", nargs=1, help="The name of the generated "
"file or directory if path is a directory."
" Default: path_PyQt5 (path_PyQt4 if --nopyqt5)")
parser.add_argument("--diff", nargs='?', const='same_as',
help="Write a diff file. If there's more than one file "
"converted, all the diff are written into one file. "
"If no name is provided, the diff file will be named "
"with the name of the source."
" Default: False")
parser.add_argument("--diffs", action="store_true",
help="Write a diff file for each file converted."
"The diff files will be created in the same destination "
"dir as the converted files"
" Default: False")
parser.add_argument("--nolog", action="store_true",
help="Do not create a log file."
" Default: False")
parser.add_argument("--nopyqt5", action="store_true",
help="Only perform updates that are compatable with PyQt4."
" Default: False")
arg = parser.parse_args()
if arg.path:
self.path = self.check_path(arg.path)
if not self.path:
sys.exit()
if arg.nosubdir:
self.nosubdir = True
if arg.followlinks:
self.followlinks = True
if arg.diff:
self.write_diff = arg.diff
if arg.diffs:
self.write_diffs = True
if arg.nopyqt5:
self.nopyqt5 = True
if arg.o:
self.destdir = self.check_path(arg.o[0], True)
if not self.destdir:
sys.exit()
else:
self.destdir = self.path
if arg.nolog:
self.log = None
else:
self.log = 'pyqt4_to_pyqt4.log' if self.nopyqt5 else 'pyqt4_to_pyqt5.log'
date = datetime.now().strftime("%A %d. %B %Y %H:%M")
self.print_('** %s %s **\nArgs: %s\n' % (self.log, date, sys.argv))
self.prepare_changes(self.followlinks)
def is_python_file(self, path):
"""Checks if the given path is a Python file or not.
Args:
path -- path to file
Returns:
bool -- True if the path is a Python file and False otherwise
"""
# check if file is a regular file
mode = os.stat(path).st_mode
if not stat.S_ISREG(mode):
return False
# check if file has a Python extension
ext = os.path.splitext(path)[1]
if ext in PYEXT:
return True
# check if file is executable and contains a Python shebang
if mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
with open(path, 'r') as inf:
line = inf.readline().strip()
if line in PYSHEBANG:
return True
return False
def prepare_changes(self, followlinks=False):
ver = "PyQt4" if self.nopyqt5 else "PyQt5"
if os.path.isdir(self.path):
if self.destdir == self.path:
self.destdir = self.path + "_" + ver
self.copy_dir(self.destdir, self.path, followlinks=followlinks)
self.set_diff_option('dir')
self.process_from_dir(self.destdir, followlinks=followlinks)
elif os.path.isfile(self.path):
if not self.is_python_file(self.path):
# Assume this is a list of files
files, subdirs = self.read_filenames(self.path)
if self.destdir == self.path:
self.destdir = "__" + ver + "__"
self.copy_files(self.destdir, subdirs, files)
self.set_diff_option('dir')
self.process_from_dir(self.destdir, followlinks=followlinks)
else:
if self.destdir == self.path:
f, e = os.path.splitext(self.path)
self.destdir = "".join([f, "_"+ver, e])
if self.write_diff:
self.set_diff_option('file')
cnv = PyQt4ToPyQt5(self.path, self.destdir, self.log, self.nopyqt5)
cnv.setup()
self.write_diff_file(self.destdir, self.path)
def process_from_dir(self, fld, followlinks=False):
self.print_('Beginning into: %s\n' % fld)
for root, _, files in os.walk(fld, followlinks=followlinks):
files.sort()
for f in files:
fname = os.path.join(root, f)
cnv = PyQt4ToPyQt5(fname, fname, self.log, self.nopyqt5)
cnv.setup()
self.write_diff_file(fname)
def copy_dir(self, dest, orig, followlinks=False):
self.copied = {}
try:
os.makedirs(dest)
except Exception as why:
sys.stdout.write("Can't create the dir: `%s`\nReason: %s\n" % (dest, why))
sys.exit()
if self.nosubdir:
files = glob.glob(os.path.join(orig, '*.py'))
for f in files:
shutil.copy(f, dest)
self.copied[dest] = f
return
for root, dirs, files in os.walk(orig, followlinks=followlinks):
dirs[:] = [d for d in dirs if d not in ('__pycache__', '.git')]
target = root.replace(orig, dest)
for name in dirs:
os.makedirs(os.path.join(target, name))
for name in files:
src = os.path.join(root, name)
if self.is_python_file(src):
cp = os.path.join(target, name)
shutil.copy(src, cp)
self.copied[cp] = src
def read_filenames(self, path):
try:
with open(path, 'r') as inf:
files = [f.strip() for f in inf.readlines()]
except IOError as why:
sys.stdout.write("Can't read the file: `%s`\nReason: %s\n" % (path, why))
sys.exit()
files.sort()
dirs = set([])
for f in files:
d = os.path.dirname(f)
if d:
dirs.add(d)
dirs = list(dirs)
dirs.sort()
return files, dirs
def copy_files(self, dest, dirs, files):
self.copied = {}
if not os.path.exists(dest):
try:
os.makedirs(dest)
except Exception as why:
sys.stdout.write("Can't create the dir: `%s`\nReason: %s\n" % (dest, why))
sys.exit()
for f in files:
if not os.path.isfile(f):
sys.stdout.write('File `%s` not found, ignored\n' % f)
continue
cp = os.path.join(dest, os.path.basename(f))
shutil.copy(f, cp)
self.copied[cp] = f
def check_path(self, path, writable=False):
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(os.getcwd(), path))
if not writable:
if not os.path.exists(path):
sys.stdout.write('No such file or directory: `%s`\n' % path)
return False
return path
parent = os.path.dirname(path)
if not os.access(parent, os.W_OK):
sys.stdout.write('Destination dir `%s` is read only\n' % parent)
return False
return path
def set_diff_option(self, opt):
"""Sets the diff file option.
"""
if not any((self.write_diff, self.write_diffs)):
return
if opt == 'file':
# Convert just one file
if self.write_diff == 'same_as':
# Using same name as the file converted
self.filename_diff = 'destfile'
elif os.path.isdir(self.write_diff):
if not self.check_path(self.write_diff, True):
sys.stdout.write('Dir `%s` is read only\n' % self.write_diff)
self.write_diff = False
return
# Using provided dir path + the name of the file converted
fname = os.path.splitext(os.path.basename(self.destdir))[0] + '.diff'
self.filename_diff = os.path.join(self.write_diff, fname)
else:
# Using diff file name provided
self.filename_diff = self.write_diff
elif opt == 'dir':
# Convert several files in dir(s)
if self.write_diffs:
# One diff file for each file converted
self.filename_diff = 'destfile'
elif self.write_diff == 'same_as':
# Using one file diff into the destination dir
self.filename_diff = os.path.join(self.destdir, 'DIFFs.diff')
else:
if os.path.isdir(self.write_diff):
if not self.check_path(self.write_diff, True):
sys.stdout.write('Dir `%s` is read only\n' % self.write_diff)
self.write_diff = False
return
# Using provided dir path + DIFFs.diff
self.filename_diff = os.path.join(self.write_diff, 'DIFFs.diff')
else:
# Using provided file path
self.filename_diff = self.write_diff
def write_diff_file(self, dest, orig=None):
if not self.filename_diff:
return
if self.filename_diff == 'destfile':
diffname = os.path.splitext(dest)[0] + '.diff'
self.print_('Write diff file: `%s`' % self.filename_diff)
else:
diffname = self.filename_diff
if orig is None:
orig = self.copied[dest]
cmd = ['diff', '-u', orig, dest]
with open(diffname, 'a') as outf:
reply = subprocess.Popen(cmd, stdout=subprocess.PIPE)
outf.write(str(reply.communicate()[0]))
def print_(self, msg):
if self.log:
with open(self.log, 'a') as outf:
outf.write('%s\n' % msg)
def cli():
main = Main(sys.argv)
|
rferrazz/pyqt4topyqt5
|
pyqt4topyqt5/__init__.py
|
Python
|
lgpl-3.0
| 97,910
|
[
"VisIt"
] |
be0b112871bd5bda97ae95a49545305cacc65a6aae38685a06e70543dcb46a50
|
import re
import os, sys
import numpy as np
import pandas as pd
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
def add_graphics_theme_to_nx_graph(
nx_graph,
edge_color=None,
node_size_factor=50,
edge_size_factor=500):
"""adjust nodes and edges
"""
# node size, stroke
for node_name, node_attrs in nx_graph.nodes(data=True):
#node_size = nx_graph.nodes[node_name]["numexamples"] / float(node_size_factor)
#node_size = nx_graph.nodes[node_name]["numexamples"] / float(nx_graph.graph["numexamples"])
#node_size *= node_size_factor
node_size = 100
graphics = {
"type": "ellipse",
"w": node_size,
"h": node_size,
"fill": "#FFFFFF",
"outline": "#000000",
"width": 1.0,
"fontSize": 14
}
if nx_graph.nodes[node_name].get("graphics") is not None:
nx_graph.nodes[node_name]["graphics"].update(graphics)
else:
nx_graph.nodes[node_name]["graphics"] = graphics
# edges
for start_node, end_node in nx_graph.edges():
for edge_idx in xrange(len(nx_graph[start_node][end_node])):
#edge_width = nx_graph[start_node][end_node][edge_idx]["numexamples"] / float(
# edge_size_factor)
#edge_width = nx_graph[start_node][end_node][edge_idx]["numexamples"] / float(
# nx_graph.graph["numexamples"])
#edge_width *= edge_size_factor
edge_width = 1.0
graphics = {
"type": "arc",
"width": edge_width,
"targetArrow": "delta"
}
if edge_color is not None:
graphics["fill"] = edge_color
if nx_graph[start_node][end_node][edge_idx].get("graphics") is not None:
nx_graph[start_node][end_node][edge_idx]["graphics"].update(graphics)
else:
nx_graph[start_node][end_node][edge_idx]["graphics"] = graphics
return None
def stringize_nx_graph(nx_graph):
"""preparatory function for writing out to gml
"""
# graph attributes
for key in nx_graph.graph.keys():
if isinstance(nx_graph.graph[key], (list, set, np.ndarray)):
nx_graph.graph[key] = ",".join([
str(val) for val in list(nx_graph.graph[key])])
# node attributes
for node_name, node_attrs in nx_graph.nodes(data=True):
for key in node_attrs.keys():
if isinstance(nx_graph.nodes[node_name][key], (list, set, np.ndarray)):
nx_graph.nodes[node_name][key] = ",".join([
str(val) for val in nx_graph.nodes[node_name][key]])
# adjust node name for nice output in cytoscape
new_node_name = re.sub(r"HCLUST.\d+_", "", node_name)
new_node_name = new_node_name.replace(".UNK.0.A", "")
nx_graph.nodes[node_name]["name"] = new_node_name
# edge attributes
for start_node, end_node in nx_graph.edges():
for edge_idx in xrange(len(nx_graph[start_node][end_node])):
edge_attrs = nx_graph[start_node][end_node][edge_idx]
for key in edge_attrs.keys():
if isinstance(edge_attrs[key], (list, set, np.ndarray)):
nx_graph[start_node][end_node][edge_idx][key] = ",".join([
str(val) for val in nx_graph[start_node][end_node][edge_idx][key]])
return nx_graph
def main():
"""build a network view
"""
# files
summary_file = sys.argv[1]
pwms_to_tfs_file = sys.argv[2]
expressed_tfs_file = sys.argv[3] # TODO
# TODO pull in num regions to resize things? but complicated with overlaps etc
# TODO edit edges with type of interaction
# TODO may want to color by trajectory, to demonstrate waves of trajectory
# read in data
summary = pd.read_csv(summary_file, sep="\t")
pwms_to_tfs = pd.read_csv(pwms_to_tfs_file, sep="\t")
pwms_to_tfs = pwms_to_tfs[pwms_to_tfs["expressed"].notna()]
pwms_to_filt_tfs = {} # dict: key - pwm names, vals - dict of ensembl ids to hgnc ids
for line_idx in range(pwms_to_tfs.shape[0]):
pwm_info = pwms_to_tfs.iloc[line_idx,:]
pwm_name = pwm_info["hclust_model_name"]
pwm_to_tf = dict(zip(pwm_info["expressed"].split(";"), pwm_info["expressed_hgnc"].split(";")))
pwms_to_filt_tfs[pwm_name] = pwm_to_tf
# filter expressed hgncs for dynamic ones only
tfs_filt = pd.read_csv(expressed_tfs_file, sep="\t", index_col=0)
for pwm_name in pwms_to_filt_tfs.keys():
tfs_tmp = pwms_to_filt_tfs[pwm_name]
for ensembl_tf in tfs_tmp.keys():
if ensembl_tf not in tfs_filt.index:
del tfs_tmp[ensembl_tf]
if len(tfs_tmp.keys()) == 0:
del pwms_to_filt_tfs[pwm_name]
pwms_to_filt_tfs[pwm_name] = tfs_tmp
# add in tfs column
tf1 = []
for pwm in summary["pwm1"]:
tf_str = []
for ensembl_id in pwms_to_filt_tfs[pwm]:
tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])
# TODO try add in max point
expression = tfs_filt.loc[ensembl_id,:]
max_idx = np.argmax(expression.values)
tf_str.append(str(max_idx))
tf_str = (";").join(tf_str)
tf1.append(tf_str)
summary["tf1"] = tf1
tf2 = []
for pwm in summary["pwm2"]:
tf_str = []
for ensembl_id in pwms_to_filt_tfs[pwm]:
tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])
expression = tfs_filt.loc[ensembl_id,:]
max_idx = np.argmax(expression.values)
tf_str.append(str(max_idx))
tf_str = (";").join(tf_str)
tf2.append(tf_str)
summary["tf2"] = tf2
# remove failed rules
summary = summary[~summary["interaction"].str.contains("FAILED")]
# make graph
graph = nx.from_pandas_edgelist(summary, "tf1", "tf2")
# set up positions
#pos = graphviz_layout(graph, prog="dot")
pos = graphviz_layout(graph, prog="neato")
scale_factor = 3
for key in pos.keys():
coords = pos[key]
pos[key] = {"x": scale_factor*coords[0], "y": -scale_factor*coords[1]}
nx.set_node_attributes(graph, pos, "graphics") # note this is diff from v1 to v2 in networkx
# add graphics
add_graphics_theme_to_nx_graph(graph)
# write gml
out_file = "summary.gml"
nx.write_gml(stringize_nx_graph(graph), out_file, stringizer=str)
# tfs: for each tf, get gene column
return
main()
|
vervacity/ggr-project
|
scripts/analyze.mpra_summ_w_tfs.py
|
Python
|
mit
| 6,736
|
[
"Cytoscape"
] |
b09f9c881d27bab9206755adae4abcf052fbeb5e28b3c336485fcafa7bf109b2
|
"""
Core visualization operations based on Mayavi.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Mark Wronkiewicz <wronk.mark@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
import warnings
import numpy as np
from mayavi.core.scene import Scene
from mayavi.core.ui.mayavi_scene import MayaviScene
from tvtk.pyface.tvtk_scene import TVTKScene
from .base_renderer import _BaseRenderer
from ._utils import _check_color, _alpha_blend_background, ALLOWED_QUIVER_MODES
from ...surface import _normalize_vectors
from ...utils import (_import_mlab, _validate_type, SilenceStdout,
copy_base_doc_to_subclass_doc, _check_option)
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : Source
Mayavi source handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the source."""
if self.pts is not None:
self.pts.visible = state
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
mlab: mayavi.mlab
Main Mayavi access point.
fig: mlab.Figure
Mayavi scene handle.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor='black',
name=None, show=False, shape=(1, 1), smooth_shading=True):
if bgcolor is not None:
bgcolor = _check_color(bgcolor)
self.mlab = _import_mlab()
self.shape = shape
if fig is None:
self.fig = _mlab_figure(figure=name, bgcolor=bgcolor, size=size)
elif isinstance(fig, int):
self.fig = _mlab_figure(figure=fig, bgcolor=bgcolor, size=size)
else:
self.fig = fig
self.fig._window_size = size
_toggle_mlab_render(self.fig, show)
@property
def figure(self): # cross-compat w/PyVista
return self.fig
def subplot(self, x, y):
pass
def scene(self):
return self.fig
def set_interaction(self, interaction):
from tvtk.api import tvtk
if self.fig.scene is not None:
self.fig.scene.interactor.interactor_style = \
getattr(tvtk, f'InteractorStyle{interaction.capitalize()}')()
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
# normals and pickable are unused
kwargs.pop('pickable', None)
del normals
if color is not None:
color = _check_color(color)
if color is not None and isinstance(color, np.ndarray) \
and color.ndim > 1:
if color.shape[1] == 3:
vertex_color = np.c_[color, np.ones(len(color))] * 255.0
else:
vertex_color = color * 255.0
# create a lookup table to enable one color per vertex
scalars = np.arange(len(color))
color = None
else:
vertex_color = None
with warnings.catch_warnings(record=True): # traits
surface = self.mlab.triangular_mesh(x, y, z, triangles,
color=color,
scalars=scalars,
opacity=opacity,
figure=self.fig,
vmin=vmin,
vmax=vmax,
representation=representation,
line_width=line_width,
**kwargs)
l_m = surface.module_manager.scalar_lut_manager
if vertex_color is not None:
l_m.lut.table = vertex_color
elif isinstance(colormap, np.ndarray):
if colormap.dtype == np.uint8:
l_m.lut.table = colormap
elif colormap.dtype == np.float64:
l_m.load_lut_from_list(colormap)
else:
raise TypeError('Expected type for colormap values are'
' np.float64 or np.uint8: '
'{} was given'.format(colormap.dtype))
elif colormap is not None:
from matplotlib.cm import get_cmap
l_m.load_lut_from_list(
get_cmap(colormap)(np.linspace(0, 1, 256)))
else:
assert color is not None
surface.actor.property.shading = shading
surface.actor.property.backface_culling = backface_culling
return surface
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
mesh = _create_mesh_surf(surface, self.fig, scalars=scalars)
with warnings.catch_warnings(record=True): # traits
cont = self.mlab.pipeline.contour_surface(
mesh, contours=contours, line_width=width, vmin=vmin,
vmax=vmax, opacity=opacity, figure=self.fig)
cont.module_manager.scalar_lut_manager.lut.table = colormap
return cont
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
if color is not None:
color = _check_color(color)
if normalized_colormap:
colormap = colormap * 255.0
# Make a solid surface
mesh = _create_mesh_surf(surface, self.fig, scalars=scalars)
with warnings.catch_warnings(record=True): # traits
surface = self.mlab.pipeline.surface(
mesh, color=color, opacity=opacity, vmin=vmin, vmax=vmax,
figure=self.fig)
if colormap is not None:
surface.module_manager.scalar_lut_manager.lut.table = colormap
surface.actor.property.backface_culling = backface_culling
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
color = _check_color(color)
center = np.atleast_2d(center)
x, y, z = center.T
surface = self.mlab.points3d(x, y, z, color=color,
resolution=resolution,
scale_factor=scale, opacity=opacity,
figure=self.fig)
surface.actor.property.backface_culling = backface_culling
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
color = _check_color(color)
origin = np.atleast_2d(origin)
destination = np.atleast_2d(destination)
if scalars is None:
# TODO: iterating over each tube rather than plotting in
# one call may be slow.
# See https://github.com/mne-tools/mne-python/issues/7644
for idx in range(origin.shape[0]):
surface = self.mlab.plot3d([origin[idx, 0],
destination[idx, 0]],
[origin[idx, 1],
destination[idx, 1]],
[origin[idx, 2],
destination[idx, 2]],
tube_radius=radius,
color=color,
figure=self.fig)
else:
for idx in range(origin.shape[0]):
surface = self.mlab.plot3d([origin[idx, 0],
destination[idx, 0]],
[origin[idx, 1],
destination[idx, 1]],
[origin[idx, 2],
destination[idx, 2]],
[scalars[idx, 0],
scalars[idx, 1]],
tube_radius=radius,
vmin=vmin,
vmax=vmax,
colormap=colormap,
figure=self.fig)
surface.module_manager.scalar_lut_manager.reverse_lut = reverse_lut
return surface
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None, solid_transform=None):
_check_option('mode', mode, ALLOWED_QUIVER_MODES)
color = _check_color(color)
with warnings.catch_warnings(record=True): # traits
if mode in ('arrow', '2darrow'):
self.mlab.quiver3d(x, y, z, u, v, w, mode=mode,
color=color, scale_factor=scale,
scale_mode=scale_mode,
resolution=resolution, scalars=scalars,
opacity=opacity, figure=self.fig)
elif mode in ('cone', 'sphere', 'oct'):
use_mode = 'sphere' if mode == 'oct' else mode
quiv = self.mlab.quiver3d(x, y, z, u, v, w, color=color,
mode=use_mode, scale_factor=scale,
opacity=opacity, figure=self.fig)
if mode == 'sphere':
quiv.glyph.glyph_source.glyph_source.center = 0., 0., 0.
elif mode == 'oct':
_oct_glyph(quiv.glyph.glyph_source, solid_transform)
else:
assert mode == 'cylinder', mode # should be guaranteed above
quiv = self.mlab.quiver3d(x, y, z, u, v, w, mode=mode,
color=color, scale_factor=scale,
opacity=opacity, figure=self.fig)
if glyph_height is not None:
quiv.glyph.glyph_source.glyph_source.height = glyph_height
if glyph_center is not None:
quiv.glyph.glyph_source.glyph_source.center = glyph_center
if glyph_resolution is not None:
quiv.glyph.glyph_source.glyph_source.resolution = \
glyph_resolution
quiv.actor.property.backface_culling = backface_culling
def text2d(self, x_window, y_window, text, size=14, color='white',
justification=None):
if color is not None:
color = _check_color(color)
size = 14 if size is None else size
with warnings.catch_warnings(record=True): # traits
text = self.mlab.text(x_window, y_window, text, color=color,
figure=self.fig)
text.property.font_size = size
text.actor.text_scale_mode = 'viewport'
if isinstance(justification, str):
text.property.justification = justification
def text3d(self, x, y, z, text, scale, color='white'):
color = _check_color(color)
with warnings.catch_warnings(record=True): # traits
self.mlab.text3d(x, y, z, text, scale=scale, color=color,
figure=self.fig)
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
with warnings.catch_warnings(record=True): # traits
bar = self.mlab.scalarbar(source, title=title, nb_labels=n_labels)
if color is not None:
bar.label_text_property.color = _check_color(color)
if bgcolor is not None:
from tvtk.api import tvtk
bgcolor = np.asarray(bgcolor)
bgcolor = np.append(bgcolor, 1.0) * 255.
cmap = source.module_manager.scalar_lut_manager
lut = cmap.lut
ctable = lut.table.to_array()
cbar_lut = tvtk.LookupTable()
cbar_lut.deep_copy(lut)
vals = _alpha_blend_background(ctable, bgcolor)
cbar_lut.table.from_array(vals)
cmap.scalar_bar.lookup_table = cbar_lut
def show(self):
if self.fig is not None:
_toggle_mlab_render(self.fig, True)
def close(self):
_close_3d_figure(figure=self.fig)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=None):
_set_3d_view(figure=self.fig, azimuth=azimuth,
elevation=elevation, distance=distance,
focalpoint=focalpoint, roll=roll)
def reset_camera(self):
renderer = getattr(self.fig.scene, 'renderer', None)
if renderer is not None:
renderer.reset_camera()
def screenshot(self, mode='rgb', filename=None):
return _take_3d_screenshot(figure=self.fig, mode=mode,
filename=filename)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.fig, xyz)
xy = dict(zip(ch_names, xy))
pts = self.fig.children[-1]
return _Projection(xy=xy, pts=pts)
def enable_depth_peeling(self):
if self.fig.scene is not None:
self.fig.scene.renderer.use_depth_peeling = True
def remove_mesh(self, surface):
if self.fig.scene is not None:
self.fig.scene.renderer.remove_actor(surface.actor)
def _mlab_figure(**kwargs):
"""Create a Mayavi figure using our defaults."""
from .._3d import _get_3d_option
fig = _import_mlab().figure(**kwargs)
# If using modern VTK/Mayavi, improve rendering with FXAA
antialias = _get_3d_option('antialias')
if antialias and hasattr(getattr(fig.scene, 'renderer', None), 'use_fxaa'):
fig.scene.renderer.use_fxaa = True
return fig
def _toggle_mlab_render(fig, render):
mlab = _import_mlab()
if mlab.options.backend != 'test':
fig.scene.disable_render = not render
def _create_mesh_surf(surf, fig=None, scalars=None, vtk_normals=True):
"""Create Mayavi mesh from MNE surf."""
mlab = _import_mlab()
x, y, z = surf['rr'].T
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(
x, y, z, surf['tris'], scalars=scalars, figure=fig)
if vtk_normals:
mesh = mlab.pipeline.poly_data_normals(mesh)
mesh.filter.compute_cell_normals = False
mesh.filter.consistency = False
mesh.filter.non_manifold_traversal = False
mesh.filter.splitting = False
else:
# make absolutely sure these are normalized for Mayavi
nn = surf['nn'].copy()
_normalize_vectors(nn)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
return mesh
def _3d_to_2d(fig, xyz):
"""Convert 3d points to a 2d perspective using a Mayavi Scene."""
_validate_type(fig, Scene, "fig", "Scene")
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(fig.scene)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(fig.scene)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(scene):
"""Return the 4x4 matrix to transform xyz space to the current view.
This is a concatenation of the model view and perspective transforms.
"""
_validate_type(scene, (MayaviScene, TVTKScene), "scene",
"TVTKScene/MayaviScene")
cam = scene.camera
# The VTK method needs the aspect ratio and near and far
# clipping planes in order to return the proper transform.
scene_size = tuple(scene.get_size())
clip_range = cam.clipping_range
aspect_ratio = float(scene_size[0]) / scene_size[1]
# Get the vtk matrix object using the aspect ratio we defined
vtk_comb_trans_mat = cam.get_composite_projection_transform_matrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = vtk_comb_trans_mat.to_array()
return vtk_comb_trans_mat
def _get_view_to_display_matrix(scene):
"""Return the 4x4 matrix to convert view coordinates to display coords.
It's assumed that the view should take up the entire window and that the
origin of the window is in the upper left corner.
"""
_validate_type(scene, (MayaviScene, TVTKScene), "scene",
"TVTKScene/MayaviScene")
# normalized view coordinates have the origin in the middle of the space
# so we need to scale by width and height of the display window and shift
# by half width and half height. The matrix accomplishes that.
x, y = tuple(scene.get_size())
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
def _close_all():
from mayavi import mlab
mlab.close(all=True)
def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None,
reset_camera=True):
from mayavi import mlab
with warnings.catch_warnings(record=True): # traits
with SilenceStdout():
mlab.view(azimuth, elevation, distance,
focalpoint=focalpoint, figure=figure, roll=roll)
mlab.draw(figure)
def _set_3d_title(figure, title, size=40):
from mayavi import mlab
text = mlab.title(text='', figure=figure)
text.property.vertical_justification = 'top'
text.property.font_size = size
mlab.draw(figure)
def _check_3d_figure(figure):
try:
import mayavi # noqa F401
except Exception:
raise TypeError('figure must be a mayavi scene but the'
'mayavi package is not found.')
else:
from mayavi.core.scene import Scene
if not isinstance(figure, Scene):
raise TypeError('figure must be a mayavi scene.')
def _save_figure(img, filename):
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
fig = Figure(frameon=False)
FigureCanvasAgg(fig)
fig.figimage(img, resize=True)
fig.savefig(filename)
def _close_3d_figure(figure):
from mayavi import mlab
mlab.close(figure)
def _take_3d_screenshot(figure, mode='rgb', filename=None):
from mayavi import mlab
from mne.viz.backends.renderer import MNE_3D_BACKEND_TESTING
if MNE_3D_BACKEND_TESTING:
ndim = 3 if mode == 'rgb' else 4
if figure.scene is None:
figure_size = figure._window_size
else:
figure_size = figure.scene._renwin.size
img = np.zeros(tuple(figure_size) + (ndim,), np.uint8)
else:
from pyface.api import GUI
gui = GUI()
gui.process_events()
with warnings.catch_warnings(record=True): # traits
img = mlab.screenshot(figure, mode=mode)
if isinstance(filename, str):
_save_figure(img, filename)
return img
@contextmanager
def _testing_context(interactive):
mlab = _import_mlab()
orig_backend = mlab.options.backend
mlab.options.backend = 'test'
try:
yield
finally:
mlab.options.backend = orig_backend
def _oct_glyph(glyph_source, transform):
from tvtk.api import tvtk
from tvtk.common import configure_input
from traits.api import Array
gs = tvtk.PlatonicSolidSource()
# Workaround for:
# File "mayavi/components/glyph_source.py", line 231, in _glyph_position_changed # noqa: E501
# g.center = 0.0, 0.0, 0.0
# traits.trait_errors.TraitError: Cannot set the undefined 'center' attribute of a 'TransformPolyDataFilter' object. # noqa: E501
class SafeTransformPolyDataFilter(tvtk.TransformPolyDataFilter):
center = Array(shape=(3,), value=np.zeros(3))
gs.solid_type = 'octahedron'
if transform is not None:
# glyph: mayavi.modules.vectors.Vectors
# glyph.glyph: vtkGlyph3D
# glyph.glyph.glyph: mayavi.components.glyph.Glyph
assert transform.shape == (4, 4)
tr = tvtk.Transform()
tr.set_matrix(transform.ravel())
trp = SafeTransformPolyDataFilter()
configure_input(trp, gs)
trp.transform = tr
trp.update()
gs = trp
glyph_source.glyph_source = gs
|
olafhauk/mne-python
|
mne/viz/backends/_pysurfer_mayavi.py
|
Python
|
bsd-3-clause
| 22,459
|
[
"Mayavi",
"VTK"
] |
1d146fd066c4460663e4fe3eef6eac957731d453d7f6bd81ced16ed386af71b0
|
########################################################################
# $HeadURL $
# File: CleanReqDBAgent.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/05/17 08:31:26
########################################################################
""" :mod: CleanReqDBAgent
=====================
.. module: CleanReqDBAgent
:synopsis: cleaning RequestDB from obsolete records and kicking assigned requests
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
cleaning ReqDB from obsolete records and kicking assigned requests
"""
__RCSID__ = "$Id: $"
# #
# @file CleanReqDBAgent.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/05/17 08:32:08
# @brief Definition of CleanReqDBAgent class.
# # imports
import datetime
# # from DIRAC
from DIRAC import S_OK, gMonitor
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
AGENT_NAME = "RequestManagement/CleanReqDBAgent"
########################################################################
class CleanReqDBAgent( AgentModule ):
"""
.. class:: CleanReqDBAgent
"""
# # DEL GRACE PERIOD in DAYS
DEL_GRACE_DAYS = 60
# # DEL LIMIT
DEL_LIMIT = 100
# # KICK PERIOD in HOURS
KICK_GRACE_HOURS = 1
# # KICK LIMIT
KICK_LIMIT = 10000
# # remove failed requests flag
DEL_FAILED = False
# # request client
__requestClient = None
def requestClient( self ):
""" request client getter """
if not self.__requestClient:
self.__requestClient = ReqClient()
return self.__requestClient
def initialize( self ):
""" initialization """
self.DEL_GRACE_DAYS = self.am_getOption( "DeleteGraceDays", self.DEL_GRACE_DAYS )
self.log.info( "Delete grace period = %s days" % self.DEL_GRACE_DAYS )
self.DEL_LIMIT = self.am_getOption( "DeleteLimit", self.DEL_LIMIT )
self.log.info( "Delete limit = %s request/cycle" % self.DEL_LIMIT )
self.DEL_FAILED = self.am_getOption( "DeleteFailed", self.DEL_FAILED )
self.log.info( "Delete failed requests: %s" % { True: "yes", False: "no"}[self.DEL_FAILED] )
self.KICK_GRACE_HOURS = self.am_getOption( "KickGraceHours", self.KICK_GRACE_HOURS )
self.log.info( "Kick assigned requests period = %s hours" % self.KICK_GRACE_HOURS )
self.KICK_LIMIT = self.am_getOption( "KickLimit", self.KICK_LIMIT )
self.log.info( "Kick limit = %s request/cycle" % self.KICK_LIMIT )
# # gMonitor stuff
gMonitor.registerActivity( "DeletedRequests", "Deleted finished requests",
"CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "KickedRequests", "Assigned requests kicked",
"CleanReqDBAgent", "Requests/min", gMonitor.OP_SUM )
return S_OK()
def execute( self ):
""" execution in one cycle """
now = datetime.datetime.utcnow()
kickTime = now - datetime.timedelta( hours = self.KICK_GRACE_HOURS )
rmTime = now - datetime.timedelta( days = self.DEL_GRACE_DAYS )
# # kick
statusList = [ "Assigned" ]
requestNamesList = self.requestClient().getRequestNamesList( statusList, self.KICK_LIMIT )
if not requestNamesList["OK"]:
self.log.error( "execute: %s" % requestNamesList["Message"] )
return requestNamesList
requestNamesList = requestNamesList["Value"]
kicked = 0
for requestName, status, lastUpdate in requestNamesList:
reqStatus = self.requestClient().getRequestStatus( requestName )
if not reqStatus['OK']:
self.log.error( ( "execute: unable to get request status", reqStatus['Message'] ) )
continue
status = reqStatus['Value']
if lastUpdate < kickTime and status == 'Assigned':
getRequest = self.requestClient().peekRequest( requestName )
if not getRequest["OK"]:
self.log.error( "execute: unable to read request '%s': %s" % ( requestName, getRequest["Message"] ) )
continue
getRequest = getRequest["Value"]
if getRequest and getRequest.LastUpdate < kickTime:
self.log.info( "execute: kick assigned request '%s' in status %s" % ( requestName, getRequest.Status ) )
putRequest = self.requestClient().putRequest( getRequest )
if not putRequest["OK"]:
self.log.error( "execute: unable to put request '%s': %s" % ( requestName, putRequest["Message"] ) )
continue
else:
self.log.verbose( "Kicked request %d" % putRequest['Value'] )
kicked += 1
# # delete
statusList = [ "Done", "Failed", "Canceled" ] if self.DEL_FAILED else [ "Done" ]
requestNamesList = self.requestClient().getRequestNamesList( statusList, self.DEL_LIMIT )
if not requestNamesList["OK"]:
self.log.error( "execute: %s" % requestNamesList["Message"] )
return requestNamesList
requestNamesList = requestNamesList["Value"]
deleted = 0
for requestName, status, lastUpdate in requestNamesList:
if lastUpdate < rmTime:
self.log.info( "execute: deleting request '%s' with status %s" % ( requestName, status ) )
delRequest = self.requestClient().deleteRequest( requestName )
if not delRequest["OK"]:
self.log.error( "execute: unable to delete request '%s': %s" % ( requestName, delRequest["Message"] ) )
continue
deleted += 1
gMonitor.addMark( "KickedRequests", kicked )
gMonitor.addMark( "DeletedRequests", deleted )
self.log.info( "execute: kicked assigned requests = %s" % kicked )
self.log.info( "execute: deleted finished requests = %s" % deleted )
return S_OK()
|
calancha/DIRAC
|
RequestManagementSystem/Agent/CleanReqDBAgent.py
|
Python
|
gpl-3.0
| 5,696
|
[
"DIRAC"
] |
eece14a918044950a980e15ff56c9d43f39fdf0f504dfbbae141f361ea042504
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.sets import FeffInputSet
from pymatgen.io.feff import FeffPot
from pymatgen.io.cif import CifParser
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cif_file = 'CoO19128.cif'
central_atom = 'O'
cif_path = os.path.join(test_dir, cif_file)
r = CifParser(cif_path)
structure = r.get_structures()[0]
x = FeffInputSet("MaterialsProject")
class FeffInputSetTest(unittest.TestCase):
header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.666667 0.333333 0.003676
* 2 Co 0.333334 0.666666 0.503676
* 3 O 0.333334 0.666666 0.121324
* 4 O 0.666667 0.333333 0.621325"""
def test_get_header(self):
comment = 'From cif file'
header = str(FeffInputSet.get_header(x, structure, 'CoO19128.cif',
comment))
ref = FeffInputSetTest.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = FeffInputSet.get_feff_tags(x, "XANES").as_dict()
self.assertEqual(tags['COREHOLE'], "FSR",
"Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(FeffInputSet.get_feff_pot(x, structure, central_atom))
d, dr = FeffPot.pot_dict_from_string(POT)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for FeffPot")
def test_get_feff_atoms(self):
ATOMS = str(FeffInputSet.get_feff_atoms(x, structure, central_atom))
self.assertEqual(ATOMS.splitlines()[3].split()[4], central_atom,
"failed to create ATOMS string")
def test_to_and_from_dict(self):
d = x.as_dict(structure, 'XANES', 'cif', 'O', 'test')
f = d['feff.inp']
f2 = x.from_dict(d)
self.assertEqual(f, f2, "FeffinputSet to and from dict do not match")
if __name__ == '__main__':
unittest.main()
|
sonium0/pymatgen
|
pymatgen/io/tests/test_feffio_set.py
|
Python
|
mit
| 2,663
|
[
"FEFF",
"pymatgen"
] |
0fb4b9a4287ed6a4d28ecb992b715108dcd5a83eecdcab724d06046f6e07b9d0
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
from pyscf import cc
from pyscf import tdscf
from pyscf import dft
from pyscf import df
from pyscf import solvent
from pyscf.scf import cphf
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import rks as rks_grad
from pyscf.solvent import ddcosmo
from pyscf.solvent import ddcosmo_grad
from pyscf.solvent import _ddcosmo_tdscf_grad
from pyscf.symm import sph
def tda_grad(td, z):
'''ddcosmo TDA gradients'''
mol = td.mol
mf = td._scf
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
nao, nmo = mo_coeff.shape
nocc = (mo_occ>0).sum()
nvir = nmo - nocc
z = z[0].reshape(nocc,nvir).T * numpy.sqrt(2)
orbv = mo_coeff[:,nocc:]
orbo = mo_coeff[:,:nocc]
r_vdw = ddcosmo.get_atomic_radii(td.with_solvent)
fi = ddcosmo.make_fi(td.with_solvent, r_vdw)
ui = 1 - fi
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(td.with_solvent.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, td.with_solvent.lmax, True))
grids = td.with_solvent.grids
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, td.with_solvent.lmax)
L = ddcosmo.make_L(td.with_solvent, r_vdw, ylm_1sph, fi)
def fvind(x):
v_mo = numpy.einsum('iabj,xai->xbj', g[:nocc,nocc:,nocc:,:nocc], x)
v_mo += numpy.einsum('aibj,xai->xbj', g[nocc:,:nocc,nocc:,:nocc], x)
return v_mo
h1 = rhf_grad.get_hcore(mol)
s1 = rhf_grad.get_ovlp(mol)
eri1 = -mol.intor('int2e_ip1', aosym='s1', comp=3)
eri1 = eri1.reshape(3,nao,nao,nao,nao)
eri0 = ao2mo.kernel(mol, mo_coeff)
eri0 = ao2mo.restore(1, eri0, nmo).reshape(nmo,nmo,nmo,nmo)
g = eri0 * 2 - eri0.transpose(0,3,2,1)
zeta = lib.direct_sum('i+j->ij', mo_energy, mo_energy) * .5
zeta[nocc:,:nocc] = mo_energy[:nocc]
zeta[:nocc,nocc:] = mo_energy[nocc:]
dielectric = td.with_solvent.eps
if dielectric > 0:
f_epsilon = (dielectric-1.)/dielectric
else:
f_epsilon = 1
pcm_nuc = .5 * f_epsilon * nuc_part1(td.with_solvent, r_vdw, ui, ylm_1sph, cached_pol, L)
B0 = .5 * f_epsilon * make_B(td.with_solvent, r_vdw, ui, ylm_1sph, cached_pol, L)
B0 = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', B0, mo_coeff, mo_coeff, mo_coeff, mo_coeff)
g += B0 * 2
B1 = .5 * f_epsilon * make_B1(td.with_solvent, r_vdw, ui, ylm_1sph, cached_pol, L)
offsetdic = mol.offset_nr_by_atom()
de = numpy.zeros((mol.natm,3))
for ia in range(mol.natm):
shl0, shl1, p0, p1 = offsetdic[ia]
mol.set_rinv_origin(mol.atom_coord(ia))
h1ao = -mol.atom_charge(ia) * mol.intor('int1e_iprinv', comp=3)
h1ao[:,p0:p1] += h1[:,p0:p1]
h1ao = h1ao + h1ao.transpose(0,2,1)
h1ao += pcm_nuc[ia]
h1mo = numpy.einsum('pi,xpq,qj->xij', mo_coeff, h1ao, mo_coeff)
s1mo = numpy.einsum('pi,xpq,qj->xij', mo_coeff[p0:p1], s1[:,p0:p1], mo_coeff)
s1mo = s1mo + s1mo.transpose(0,2,1)
f1 = h1mo - numpy.einsum('xpq,pq->xpq', s1mo, zeta)
f1-= numpy.einsum('klpq,xlk->xpq', g[:nocc,:nocc], s1mo[:,:nocc,:nocc])
eri1a = eri1.copy()
eri1a[:,:p0] = 0
eri1a[:,p1:] = 0
eri1a = eri1a + eri1a.transpose(0,2,1,3,4)
eri1a = eri1a + eri1a.transpose(0,3,4,1,2)
g1 = lib.einsum('xpqrs,pi,qj,rk,sl->xijkl', eri1a, mo_coeff, mo_coeff, mo_coeff, mo_coeff)
tmp1 = lib.einsum('xpqrs,pi,qj,rk,sl->xijkl', B1[ia], mo_coeff, mo_coeff, mo_coeff, mo_coeff)
g1 = g1 * 2 - g1.transpose(0,1,4,3,2)
g1 += tmp1 * 2
f1 += numpy.einsum('xkkpq->xpq', g1[:,:nocc,:nocc])
f1ai = f1[:,nocc:,:nocc].copy()
c1 = s1mo * -.5
c1vo = cphf.solve(fvind, mo_energy, mo_occ, f1ai, max_cycle=50)[0]
c1[:,nocc:,:nocc] = c1vo
c1[:,:nocc,nocc:] = -(s1mo[:,nocc:,:nocc]+c1vo).transpose(0,2,1)
f1 += numpy.einsum('kapq,xak->xpq', g[:nocc,nocc:], c1vo)
f1 += numpy.einsum('akpq,xak->xpq', g[nocc:,:nocc], c1vo)
e1 = numpy.einsum('xaijb,ai,bj->x', g1[:,nocc:,:nocc,:nocc,nocc:], z, z)
e1 += numpy.einsum('xab,ai,bi->x', f1[:,nocc:,nocc:], z, z)
e1 -= numpy.einsum('xij,ai,aj->x', f1[:,:nocc,:nocc], z, z)
g1 = numpy.einsum('pjkl,xpi->xijkl', g, c1)
g1 += numpy.einsum('ipkl,xpj->xijkl', g, c1)
g1 += numpy.einsum('ijpl,xpk->xijkl', g, c1)
g1 += numpy.einsum('ijkp,xpl->xijkl', g, c1)
e1 += numpy.einsum('xaijb,ai,bj->x', g1[:,nocc:,:nocc,:nocc,nocc:], z, z)
de[ia] = e1
return de
def nuc_part(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
'''0th order'''
mol = pcmobj.mol
natm = mol.natm
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
lmax = pcmobj.lmax
nlm = (lmax+1)**2
nao = mol.nao
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
extern_point_idx = ui > 0
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
v_phi = numpy.zeros((natm, ngrid_1sph))
for ia in range(natm):
# Note (-) sign is not applied to atom_charges, because (-) is explicitly
# included in rhs and L matrix
d_rs = atom_coords.reshape(-1,1,3) - cav_coords[ia]
v_phi[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi = -numpy.einsum('n,xn,jn,jn->jx', weights_1sph, ylm_1sph, ui, v_phi)
Xvec = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi.ravel())
Xvec = Xvec.reshape(natm,nlm)
i1 = 0
scaled_weights = numpy.empty((grids.weights.size))
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + fak_pol[0].shape[1]
eta_nj = 0
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
eta_nj += fac * numpy.einsum('mn,m->n', fak_pol[l], Xvec[ia,p0:p1])
scaled_weights[i0:i1] = eta_nj
scaled_weights *= grids.weights
ao = mol.eval_gto('GTOval', grids.coords)
vmat = -lib.einsum('g,gi,gj->ij', scaled_weights, ao, ao)
# Contribution of nuclear charges to the total density
# The factor numpy.sqrt(4*numpy.pi) is due to the product of 4*pi * Y_0^0
psi = numpy.zeros((natm, nlm))
for ia in range(natm):
psi[ia,0] += numpy.sqrt(4*numpy.pi)/r_vdw[ia] * mol.atom_charge(ia)
# <Psi, L^{-1}g> -> Psi = SL the adjoint equation to LX = g
L_S = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi.ravel())
L_S = L_S.reshape(natm,nlm)
xi_jn = numpy.einsum('n,jn,xn,jx->jn', weights_1sph, ui, ylm_1sph, L_S)
cav_coords = cav_coords[extern_point_idx]
xi_jn = xi_jn[extern_point_idx]
max_memory = pcmobj.max_memory - lib.current_memory()[0]
blksize = int(max(max_memory*.9e6/8/nao**2, 400))
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, 'int3c2e')
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor='int3c2e', aosym='s1', cintopt=cintopt)
vmat += numpy.einsum('ijn,n->ij', v_nj, xi_jn)
return vmat
def nuc_part1(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
'''1st order'''
mol = pcmobj.mol
natm = mol.natm
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
lmax = pcmobj.lmax
nlm = (lmax+1)**2
nao = mol.nao
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
aoslices = mol.aoslice_by_atom()
extern_point_idx = ui > 0
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
vmat1 = numpy.zeros((natm,3,nao,nao))
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
v_phi = numpy.zeros((natm, ngrid_1sph))
for ia in range(natm):
# Note (-) sign is not applied to atom_charges, because (-) is explicitly
# included in rhs and L matrix
d_rs = atom_coords.reshape(-1,1,3) - cav_coords[ia]
v_phi[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi0 = -numpy.einsum('n,xn,jn,jn->jx', weights_1sph, ylm_1sph, ui, v_phi)
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.ravel())
Xvec0 = Xvec0.reshape(natm,nlm)
ngrid_1sph = weights_1sph.size
v_phi0 = numpy.empty((natm,ngrid_1sph))
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
d_rs = atom_coords.reshape(-1,1,3) - cav_coords
v_phi0[ia] = numpy.einsum('z,zp->p', atom_charges, 1./lib.norm(d_rs,axis=2))
phi1 = -numpy.einsum('n,ln,azjn,jn->azjl', weights_1sph, ylm_1sph, ui1, v_phi0)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
for ja in range(natm):
rs = atom_coords[ja] - cav_coords
d_rs = lib.norm(rs, axis=1)
v_phi = atom_charges[ja] * numpy.einsum('px,p->px', rs, 1./d_rs**3)
tmp = numpy.einsum('n,ln,n,nx->xl', weights_1sph, ylm_1sph, ui[ia], v_phi)
phi1[ja,:,ia] += tmp # response of the other atoms
phi1[ia,:,ia] -= tmp # response of cavity grids
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
phi1 -= lib.einsum('aziljm,jm->azil', L1, Xvec0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1.reshape(-1,natm*nlm).T)
Xvec1 = Xvec1.T.reshape(natm,3,natm,nlm)
i1 = 0
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
i0, i1 = i1, i1 + weight.size
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
aow = numpy.einsum('gi,g->gi', ao[0], weight)
aopair1 = lib.einsum('xgi,gj->xgij', ao[1:], aow)
aow = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
aopair0 = lib.einsum('zxgi,gj->zxgij', aow, ao[0])
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
vmat1 -= numpy.einsum('m,mn,zxnij->zxij', Xvec0[ia], fac_pol, aopair0)
vtmp = numpy.einsum('m,mn,xnij->xij', Xvec0[ia],fac_pol, aopair1)
vmat1[ia,:] -= vtmp
vmat1[ia,:] -= vtmp.transpose(0,2,1)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
vmat1[ja,:,p0:p1,:] += vtmp[:,p0:p1]
vmat1[ja,:,:,p0:p1] += vtmp[:,p0:p1].transpose(0,2,1)
scaled_weights = lib.einsum('azm,mn->azn', Xvec1[:,:,ia], fac_pol)
scaled_weights *= weight
aow = numpy.einsum('gi,azg->azgi', ao[0], scaled_weights)
vmat1 -= numpy.einsum('gi,azgj->azij', ao[0], aow)
psi0 = numpy.zeros((natm, nlm))
for ia in range(natm):
psi0[ia,0] += numpy.sqrt(4*numpy.pi)/r_vdw[ia] * mol.atom_charge(ia)
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.ravel())
LS0 = LS0.reshape(natm,nlm)
LS1 = numpy.einsum('il,aziljm->azjm', LS0, L1)
LS1 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, LS1.reshape(-1,natm*nlm).T)
LS1 = LS1.T.reshape(natm,3,natm,nlm)
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
wtmp = lib.einsum('l,n,ln->ln', LS0[ia], weights_1sph, ylm_1sph)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
vmat1 -= numpy.einsum('azl,n,ln,n,pqn->azpq', LS1[:,:,ia], weights_1sph, ylm_1sph, ui[ia], v_nj)
vmat1 += lib.einsum('ln,azn,ijn->azij', wtmp, ui1[:,:,ia], v_nj)
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3, aosym='s1')
vtmp = lib.einsum('ln,n,xijn->xij', wtmp, ui[ia], v_e1_nj)
vmat1[ia] += vtmp
vmat1[ia] += vtmp.transpose(0,2,1)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
vmat1[ja,:,p0:p1,:] -= vtmp[:,p0:p1]
vmat1[ja,:,:,p0:p1] -= vtmp[:,p0:p1].transpose(0,2,1)
return vmat1
def make_B(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
'''0th order'''
mol = pcmobj.mol
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
extern_point_idx = ui > 0
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
cav_coords = cav_coords[extern_point_idx]
int3c2e = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
nao_pair = v_nj.shape[0]
v_phi = numpy.zeros((natm, ngrid_1sph, nao, nao))
v_phi[extern_point_idx] += v_nj.transpose(2,0,1)
phi = numpy.einsum('n,xn,jn,jnpq->jxpq', weights_1sph, ylm_1sph, ui, v_phi)
Xvec = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi.reshape(natm*nlm,-1))
Xvec = Xvec.reshape(natm,nlm,nao,nao)
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = numpy.einsum('gi,gj->gij', ao, aow)
psi = numpy.zeros((natm, nlm, nao, nao))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + fak_pol[0].shape[1]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi[ia,p0:p1] = -fac * numpy.einsum('mn,nij->mij', fak_pol[l], aopair[i0:i1])
B = lib.einsum('nlpq,nlrs->pqrs', psi, Xvec)
B = B + B.transpose(2,3,0,1)
return B
def make_B1(pcmobj, r_vdw, ui, ylm_1sph, cached_pol, L):
'''1st order'''
mol = pcmobj.mol
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
grids = pcmobj.grids
extern_point_idx = ui > 0
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
cav_coords = cav_coords[extern_point_idx]
int3c2e = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas,
mol._env, int3c2e)
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
nao_pair = v_nj.shape[0]
v_phi = numpy.zeros((natm, ngrid_1sph, nao, nao))
v_phi[extern_point_idx] += v_nj.transpose(2,0,1)
phi0 = numpy.einsum('n,xn,jn,jnpq->jxpq', weights_1sph, ylm_1sph, ui, v_phi)
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.reshape(natm*nlm,-1))
Xvec0 = Xvec0.reshape(natm,nlm,nao,nao)
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = numpy.einsum('gi,gj->gij', ao, aow)
psi0 = numpy.zeros((natm, nlm, nao, nao))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
i0, i1 = i1, i1 + fak_pol[0].shape[1]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi0[ia,p0:p1] = -fac * numpy.einsum('mn,nij->mij', fak_pol[l], aopair[i0:i1])
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
phi1 = numpy.zeros(ui1.shape[:3] + (nlm,nao,nao))
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
aoslices = mol.aoslice_by_atom()
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1')
phi1[:,:,ia] += lib.einsum('n,ln,azn,ijn->azlij', weights_1sph, ylm_1sph, ui1[:,:,ia], v_nj)
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3, aosym='s1')
v_e2_nj = v_e1_nj + v_e1_nj.transpose(0,2,1,3)
phi1[ia,:,ia] += lib.einsum('n,ln,n,xijn->xlij', weights_1sph, ylm_1sph, ui[ia], v_e2_nj)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
v = numpy.einsum('n,ln,n,xijn->xlij', weights_1sph, ylm_1sph, ui[ia], v_e1_nj[:,p0:p1])
phi1[ja,:,ia,:,p0:p1,:] -= v
phi1[ja,:,ia,:,:,p0:p1] -= v.transpose(0,1,3,2)
psi1 = numpy.zeros((natm,3,natm,nlm,nao,nao))
i1 = 0
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
i0, i1 = i1, i1 + weight.size
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
aow = numpy.einsum('gi,g->gi', ao[0], weight)
aopair1 = lib.einsum('xgi,gj->xgij', ao[1:], aow)
aow = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
aopair0 = lib.einsum('zxgi,gj->zxgij', aow, ao[0])
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
p1 = 0
for l in range(lmax+1):
fac = 4*numpy.pi/(l*2+1)
p0, p1 = p1, p1 + (l*2+1)
psi1[: ,:,ia,p0:p1] -= fac*numpy.einsum('mn,zxnij->zxmij', fak_pol[l], aopair0)
vtmp = fac*numpy.einsum('mn,xnij->xmij', fak_pol[l], aopair1)
psi1[ia,:,ia,p0:p1] -= vtmp
psi1[ia,:,ia,p0:p1] -= vtmp.transpose(0,1,3,2)
for ja in range(natm):
shl0, shl1, q0, q1 = aoslices[ja]
psi1[ja,:,ia,p0:p1,q0:q1,:] += vtmp[:,:,q0:q1]
psi1[ja,:,ia,p0:p1,:,q0:q1] += vtmp[:,:,q0:q1].transpose(0,1,3,2)
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1.transpose(2,3,0,1,4,5).reshape(natm*nlm,-1))
Xvec1 = Xvec1.reshape(natm,nlm,natm,3,nao,nao).transpose(2,3,0,1,4,5)
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.reshape(natm*nlm,-1))
LS0 = LS0.reshape(natm,nlm,nao,nao)
B = lib.einsum('ixnlpq,nlrs->ixpqrs', psi1, Xvec0)
B+= lib.einsum('nlpq,ixnlrs->ixpqrs', psi0, Xvec1)
B-= lib.einsum('ilpq,aziljm,jmrs->azpqrs', LS0, L1, Xvec0)
B = B + B.transpose(0,1,4,5,2,3)
return B
def B1_dot_x(pcmobj, dm, r_vdw, ui, ylm_1sph, cached_pol, L):
mol = pcmobj.mol
mol = pcmobj.mol
natm = mol.natm
nao = mol.nao
lmax = pcmobj.lmax
nlm = (lmax+1)**2
dms = numpy.asarray(dm)
is_single_dm = dms.ndim == 2
dms = dms.reshape(-1,nao,nao)
n_dm = dms.shape[0]
atom_coords = mol.atom_coords()
atom_charges = mol.atom_charges()
aoslices = mol.aoslice_by_atom()
grids = pcmobj.grids
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ngrid_1sph = coords_1sph.shape[0]
extern_point_idx = ui > 0
fi0 = ddcosmo.make_fi(pcmobj, r_vdw)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
fi1[:,:,ui==0] = 0
ui1 = -fi1
Bx = numpy.zeros((natm,3,nao,nao))
ao = mol.eval_gto('GTOval', grids.coords)
aow = numpy.einsum('gi,g->gi', ao, grids.weights)
aopair = numpy.einsum('gi,gj->gij', ao, aow)
den = numpy.einsum('gij,ij->g', aopair, dm)
psi0 = numpy.zeros((natm, nlm))
i1 = 0
for ia in range(natm):
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
i0, i1 = i1, i1 + fac_pol.shape[1]
psi0[ia] = -numpy.einsum('mn,n->m', fac_pol, den[i0:i1])
LS0 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi0.ravel())
LS0 = LS0.reshape(natm,nlm)
phi0 = numpy.zeros((natm,nlm))
phi1 = numpy.zeros((natm,3,natm,nlm))
int3c2e = mol._add_suffix('int3c2e')
int3c2e_ip1 = mol._add_suffix('int3c2e_ip1')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e)
cintopt_ip1 = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, int3c2e_ip1)
for ia in range(natm):
cav_coords = atom_coords[ia] + r_vdw[ia] * coords_1sph
#fakemol = gto.fakemol_for_charges(cav_coords[ui[ia]>0])
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
v_phi = numpy.einsum('pqg,pq->g', v_nj, dm)
phi0[ia] = numpy.einsum('n,ln,n,n->l', weights_1sph, ylm_1sph, ui[ia], v_phi)
phi1[:,:,ia] += lib.einsum('n,ln,azn,n->azl', weights_1sph, ylm_1sph, ui1[:,:,ia], v_phi)
Bx += lib.einsum('l,n,ln,azn,ijn->azij', LS0[ia], weights_1sph, ylm_1sph, ui1[:,:,ia], v_nj)
wtmp = lib.einsum('n,ln,n->ln', weights_1sph, ylm_1sph, ui[ia])
v_e1_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e_ip1, comp=3,
aosym='s1', cintopt=cintopt_ip1)
vtmp = lib.einsum('l,ln,xijn->xij', LS0[ia], wtmp, v_e1_nj)
Bx[ia] += vtmp
Bx[ia] += vtmp.transpose(0,2,1)
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
Bx[ja,:,p0:p1,:] -= vtmp[:,p0:p1]
Bx[ja,:,:,p0:p1] -= vtmp[:,p0:p1].transpose(0,2,1)
tmp = numpy.einsum('xijn,ij->xn', v_e1_nj[:,p0:p1], dm[p0:p1])
tmp += numpy.einsum('xijn,ji->xn', v_e1_nj[:,p0:p1], dm[:,p0:p1])
phitmp = numpy.einsum('ln,xn->xl', wtmp, tmp)
phi1[ja,:,ia] -= phitmp
phi1[ia,:,ia] += phitmp
Xvec0 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi0.ravel())
Xvec0 = Xvec0.reshape(natm,nlm)
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi0)
phi1 -= lib.einsum('aziljm,jm->azil', L1, Xvec0)
Xvec1 = numpy.linalg.solve(L.reshape(natm*nlm,-1), phi1.reshape(-1,natm*nlm).T)
Xvec1 = Xvec1.T.reshape(natm,3,natm,nlm)
psi1 = numpy.zeros((natm,3,natm,nlm))
i1 = 0
for ia, (coords, weight, weight1) in enumerate(rks_grad.grids_response_cc(grids)):
i0, i1 = i1, i1 + weight.size
ao = mol.eval_gto('GTOval_sph_deriv1', coords)
aow = numpy.einsum('gi,g->gi', ao[0], weight)
aopair1 = lib.einsum('xgi,gj->xgij', ao[1:], aow)
aow = numpy.einsum('gi,zxg->zxgi', ao[0], weight1)
aopair0 = lib.einsum('zxgi,gj->zxgij', aow, ao[0])
den0 = numpy.einsum('zxgij,ij->zxg', aopair0, dm)
den1 = numpy.empty((natm,3,weight.size))
for ja in range(natm):
shl0, shl1, p0, p1 = aoslices[ja]
den1[ja] = numpy.einsum('xgij,ij->xg', aopair1[:,:,p0:p1], dm[p0:p1,:])
den1[ja]+= numpy.einsum('xgij,ji->xg', aopair1[:,:,p0:p1], dm[:,p0:p1])
fak_pol, leak_idx = cached_pol[mol.atom_symbol(ia)]
fac_pol = ddcosmo._vstack_factor_fak_pol(fak_pol, lmax)
scaled_weights = lib.einsum('azm,mn->azn', Xvec1[:,:,ia], fac_pol)
scaled_weights *= weight
aow = numpy.einsum('gi,azg->azgi', ao[0], scaled_weights)
Bx -= numpy.einsum('gi,azgj->azij', ao[0], aow)
tmp = numpy.einsum('mn,zxn->zxm', fac_pol, den1)
psi1[: ,:,ia] -= numpy.einsum('mn,zxn->zxm', fac_pol, den0)
psi1[ia,:,ia] -= tmp.sum(axis=0)
for ja in range(natm):
psi1[ja,:,ia] += tmp[ja]
eta_nj = lib.einsum('mn,m->n', fac_pol, Xvec0[ia])
Bx -= lib.einsum('n,zxnpq->zxpq', eta_nj, aopair0)
vtmp = lib.einsum('n,xnpq->xpq', eta_nj, aopair1)
Bx[ia] -= vtmp
Bx[ia] -= vtmp.transpose(0,2,1)
for ja in range(natm):
shl0, shl1, q0, q1 = aoslices[ja]
Bx[ja,:,q0:q1,:] += vtmp[:,q0:q1]
Bx[ja,:,:,q0:q1] += vtmp[:,q0:q1].transpose(0,2,1)
psi1 -= numpy.einsum('il,aziljm->azjm', LS0, L1)
LS1 = numpy.linalg.solve(L.reshape(natm*nlm,-1).T, psi1.reshape(-1,natm*nlm).T)
LS1 = LS1.T.reshape(natm,3,natm,nlm)
cav_coords = (atom_coords.reshape(natm,1,3)
+ numpy.einsum('r,gx->rgx', r_vdw, coords_1sph))
cav_coords = cav_coords[extern_point_idx]
fakemol = gto.fakemol_for_charges(cav_coords)
v_nj = df.incore.aux_e2(mol, fakemol, intor=int3c2e, aosym='s1', cintopt=cintopt)
v_phi = numpy.zeros((natm, ngrid_1sph, nao, nao))
v_phi[extern_point_idx] += v_nj.transpose(2,0,1)
Bx += lib.einsum('azjx,n,xn,jn,jnpq->azpq', LS1, weights_1sph, ylm_1sph, ui, v_phi)
return Bx
dx = 0.0001
mol0 = gto.M(atom='H 0 0 0; H 0 1 1.2; H 1. .1 0; H .5 .5 1', unit='B')
mol1 = gto.M(atom='H 0 0 %g; H 0 1 1.2; H 1. .1 0; H .5 .5 1'%(-dx), unit='B')
mol2 = gto.M(atom='H 0 0 %g; H 0 1 1.2; H 1. .1 0; H .5 .5 1'%dx, unit='B')
dx = dx * 2
nao = mol0.nao_nr()
numpy.random.seed(1)
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
class KnownValues(unittest.TestCase):
def test_e_psi1(self):
def get_e_psi1(pcmobj):
pcmobj.grids.build()
mol = pcmobj.mol
natm = mol.natm
lmax = pcmobj.lmax
r_vdw = ddcosmo.get_atomic_radii(pcmobj)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True))
fi = ddcosmo.make_fi(pcmobj, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
nexposed = numpy.count_nonzero(ui==1)
nbury = numpy.count_nonzero(ui==0)
on_shell = numpy.count_nonzero(ui>0) - nexposed
nlm = (lmax+1)**2
Lmat = ddcosmo.make_L(pcmobj, r_vdw, ylm_1sph, fi)
Lmat = Lmat.reshape(natm*nlm,-1)
cached_pol = ddcosmo.cache_fake_multipoles(pcmobj.grids, r_vdw, lmax)
phi = ddcosmo.make_phi(pcmobj, dm, r_vdw, ui, ylm_1sph)
L_X = numpy.linalg.solve(Lmat, phi.ravel()).reshape(natm,-1)
psi, vmat, L_S = \
ddcosmo.make_psi_vmat(pcmobj, dm, r_vdw, ui, ylm_1sph,
cached_pol, L_X, Lmat)
psi1 = ddcosmo_grad.make_e_psi1(pcmobj, dm, r_vdw, ui, ylm_1sph,
cached_pol, L_X, Lmat)
return L_X, psi, psi1
pcmobj = ddcosmo.DDCOSMO(mol0)
L_X, psi0, psi1 = get_e_psi1(pcmobj)
pcmobj = ddcosmo.DDCOSMO(mol1)
L_X1, psi = get_e_psi1(pcmobj)[:2]
e1 = numpy.einsum('jx,jx', psi, L_X)
pcmobj = ddcosmo.DDCOSMO(mol2)
L_X2, psi = get_e_psi1(pcmobj)[:2]
e2 = numpy.einsum('jx,jx', psi, L_X)
self.assertAlmostEqual(abs((e2-e1)/dx - psi1[0,2]).max(), 0, 7)
def test_phi(self):
def get_phi1(pcmojb):
pcmobj.grids.build()
mol = pcmobj.mol
natm = mol.natm
lmax = pcmobj.lmax
r_vdw = pcmobj.get_atomic_radii()
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, lmax, True))
fi = ddcosmo.make_fi(pcmobj, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
nexposed = numpy.count_nonzero(ui==1)
nbury = numpy.count_nonzero(ui==0)
on_shell = numpy.count_nonzero(ui>0) - nexposed
nlm = (lmax+1)**2
Lmat = ddcosmo.make_L(pcmobj, r_vdw, ylm_1sph, fi)
Lmat = Lmat.reshape(natm*nlm,-1)
cached_pol = ddcosmo.cache_fake_multipoles(pcmobj.grids, r_vdw, lmax)
phi = ddcosmo.make_phi(pcmobj, dm, r_vdw, ui, ylm_1sph)
L_X = numpy.linalg.solve(Lmat, phi.ravel()).reshape(natm,-1)
psi, vmat, L_S = \
ddcosmo.make_psi_vmat(pcmobj, dm, r_vdw, ui, ylm_1sph,
cached_pol, L_X, Lmat)
phi1 = ddcosmo_grad.make_phi1(pcmobj, dm, r_vdw, ui, ylm_1sph)
phi1 = numpy.einsum('izjx,jx->iz', phi1, L_S)
return L_S, phi, phi1
pcmobj = ddcosmo.DDCOSMO(mol0)
L_S, phi0, phi1 = get_phi1(pcmobj)
pcmobj = ddcosmo.DDCOSMO(mol1)
L_S1, phi = get_phi1(pcmobj)[:2]
e1 = numpy.einsum('jx,jx', phi, L_S)
pcmobj = ddcosmo.DDCOSMO(mol2)
L_S2, phi = get_phi1(pcmobj)[:2]
e2 = numpy.einsum('jx,jx', phi, L_S)
self.assertAlmostEqual(abs((e2-e1)/dx - phi1[0,2]).max(), 0, 7)
def test_fi(self):
pcmobj = ddcosmo.DDCOSMO(mol0)
fi1 = ddcosmo_grad.make_fi1(pcmobj, pcmobj.get_atomic_radii())
ui1 = -fi1
fi = ddcosmo.make_fi(pcmobj, pcmobj.get_atomic_radii())
ui = 1 - fi
ui1[:,:,ui<0] = 0
pcmobj = ddcosmo.DDCOSMO(mol1)
fi_1 = ddcosmo.make_fi(pcmobj, pcmobj.get_atomic_radii())
ui_1 = 1 - fi_1
ui_1[ui_1<0] = 0
pcmobj = ddcosmo.DDCOSMO(mol2)
fi_2 = ddcosmo.make_fi(pcmobj, pcmobj.get_atomic_radii())
ui_2 = 1 - fi_2
ui_2[ui_2<0] = 0
self.assertAlmostEqual(abs((fi_2-fi_1)/dx - fi1[0,2]).max(), 0, 6)
self.assertAlmostEqual(abs((ui_2-ui_1)/dx - ui1[0,2]).max(), 0, 6)
def test_L1(self):
pcmobj = ddcosmo.DDCOSMO(mol0)
r_vdw = pcmobj.get_atomic_radii()
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcmobj.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcmobj.lmax, True))
fi = ddcosmo.make_fi(pcmobj, r_vdw)
L1 = ddcosmo_grad.make_L1(pcmobj, r_vdw, ylm_1sph, fi)
pcmobj = ddcosmo.DDCOSMO(mol1)
fi = ddcosmo.make_fi(pcmobj, r_vdw)
L_1 = ddcosmo.make_L(pcmobj, r_vdw, ylm_1sph, fi)
pcmobj = ddcosmo.DDCOSMO(mol2)
fi = ddcosmo.make_fi(pcmobj, r_vdw)
L_2 = ddcosmo.make_L(pcmobj, r_vdw, ylm_1sph, fi)
self.assertAlmostEqual(abs((L_2-L_1)/dx - L1[0,2]).max(), 0, 7)
def test_e_cosmo_grad(self):
pcmobj = ddcosmo.DDCOSMO(mol0)
de = ddcosmo_grad.kernel(pcmobj, dm)
pcmobj = ddcosmo.DDCOSMO(mol1)
e1 = pcmobj.energy(dm)
pcmobj = ddcosmo.DDCOSMO(mol2)
e2 = pcmobj.energy(dm)
self.assertAlmostEqual(abs((e2-e1)/dx - de[0,2]).max(), 0, 7)
def test_scf_grad(self):
mf = ddcosmo.ddcosmo_for_scf(scf.RHF(mol0)).run()
# solvent only
de_cosmo = ddcosmo_grad.kernel(mf.with_solvent, mf.make_rdm1())
self.assertAlmostEqual(lib.fp(de_cosmo), 0.000770107393352652, 6)
# solvent + solute
de = mf.nuc_grad_method().kernel()
self.assertAlmostEqual(lib.fp(de), -0.1920179073822721, 6)
dm1 = mf.make_rdm1()
mf1 = ddcosmo.ddcosmo_for_scf(scf.RHF(mol1)).run()
e1 = mf1.e_tot
e1_cosmo = mf1.with_solvent.energy(dm1)
mf2 = ddcosmo.ddcosmo_for_scf(scf.RHF(mol2)).run()
e2 = mf2.e_tot
e2_cosmo = mf2.with_solvent.energy(dm1)
self.assertAlmostEqual(abs((e2-e1)/dx - de[0,2]).max(), 0, 7)
self.assertAlmostEqual(abs((e2_cosmo-e1_cosmo)/dx - de_cosmo[0,2]).max(), 0, 7)
sc = mf.nuc_grad_method().as_scanner()
e, g = sc('H 0 1 0; H 0 1 1.2; H 1. 0 0; H .5 .5 0')
self.assertAlmostEqual(e, -0.8317337703056022, 8)
self.assertAlmostEqual(lib.fp(g), 0.06804297145388238, 6)
mol3 = gto.M(atom='H 0 1 0; H 0 1 1.2; H 1. 0 0; H .5 .5 0', unit='B')
mf = ddcosmo.ddcosmo_for_scf(scf.RHF(mol3)).run()
de = mf.nuc_grad_method().kernel()
self.assertAlmostEqual(lib.fp(de), 0.06804297145388238, 6)
def test_casci_grad(self):
mf = scf.RHF(mol0).ddCOSMO().run()
mc = solvent.ddCOSMO(mcscf.CASCI(mf, 2, 2))
e, de = mc.nuc_grad_method().as_scanner()(mol0)
self.assertAlmostEqual(e, -1.1844606066401635, 7)
self.assertAlmostEqual(lib.fp(de), -0.18558925270492277, 5)
mf = scf.RHF(mol1).run()
mc1 = solvent.ddCOSMO(mcscf.CASCI(mf, 2, 2)).run()
e1 = mc1.e_tot
mf = scf.RHF(mol2).run()
mc2 = solvent.ddCOSMO(mcscf.CASCI(mf, 2, 2)).run()
e2 = mc2.e_tot
self.assertAlmostEqual((e2-e1)/dx, de[0,2], 3)
## FIXME: seems working?
## frozen dm in CASCI
#mf = scf.RHF(mol0).ddCOSMO().run()
#mc = solvent.ddCOSMO(mcscf.CASCI(mf, 2, 2), dm=mf.make_rdm1())
#e, de = mc.nuc_grad_method().as_scanner()(mol0)
#self.assertAlmostEqual(e, -1.1845042661517311, 7)
#self.assertAlmostEqual(lib.fp(de), -0.18563349186388467, 5)
#mf = scf.RHF(mol1).run()
#mc1 = solvent.ddCOSMO(mcscf.CASCI(mf, 2, 2), dm=mf.make_rdm1()).run()
#e1 = mc1.e_tot
#mf = scf.RHF(mol2).run()
#mc2 = solvent.ddCOSMO(mcscf.CASCI(mf, 2, 2), dm=mf.make_rdm1()).run()
#e2 = mc2.e_tot
#self.assertAlmostEqual((e2-e1)/dx, de[0,2], 4)
def test_casscf_grad(self):
mf = scf.RHF(mol0).ddCOSMO().run()
mc = solvent.ddCOSMO(mcscf.CASSCF(mf, 2, 2)).set(conv_tol=1e-9)
mc_g = mc.nuc_grad_method().as_scanner()
e, de = mc_g(mol0)
self.assertAlmostEqual(e, -1.1964048498155815, 7)
self.assertAlmostEqual(lib.fp(de), -0.18331022006442843, 5)
mf = scf.RHF(mol1).run()
mc1 = solvent.ddCOSMO(mcscf.CASSCF(mf, 2, 2)).run()
e1 = mc1.e_tot
mf = scf.RHF(mol2).run()
mc2 = solvent.ddCOSMO(mcscf.CASSCF(mf, 2, 2)).run()
e2 = mc2.e_tot
self.assertAlmostEqual((e2-e1)/dx, de[0,2], 3)
def test_ccsd_grad(self):
mf = scf.RHF(mol0).ddCOSMO().run()
mycc = cc.CCSD(mf).ddCOSMO()
e, de = mycc.nuc_grad_method().as_scanner()(mol0)
self.assertAlmostEqual(e, -1.206178782599439, 7)
self.assertAlmostEqual(lib.fp(de), -0.17959270231901459, 5)
mf = scf.RHF(mol1).run()
mycc1 = solvent.ddCOSMO(cc.CCSD(mf)).run()
e1 = mycc1.e_tot
mf = scf.RHF(mol2).run()
mycc2 = solvent.ddCOSMO(cc.CCSD(mf)).run()
e2 = mycc2.e_tot
self.assertAlmostEqual((e2-e1)/dx, de[0,2], 4)
def test_tda_grad(self):
mol0 = gto.M(atom='H 0 0 0 ; H .5 .5 .1', unit='B', basis='321g')
mol1 = gto.M(atom='H 0 0 -.001; H .5 .5 .1', unit='B', basis='321g')
mol2 = gto.M(atom='H 0 0 0.001; H .5 .5 .1', unit='B', basis='321g')
mf = scf.RHF(mol0).ddCOSMO().run()
td = solvent.ddCOSMO(tdscf.TDA(mf)).run(equilibrium_solvation=True)
ref = tda_grad(td, td.xy[0]) + mf.nuc_grad_method().kernel()
e, de = td.nuc_grad_method().as_scanner(state=1)(mol0)
de = td.nuc_grad_method().kernel()
self.assertAlmostEqual(abs(ref - de).max(), 0, 12)
td1 = mol1.RHF().ddCOSMO().run().TDA().ddCOSMO().run(equilibrium_solvation=True)
td2 = mol2.RHF().ddCOSMO().run().TDA().ddCOSMO().run(equilibrium_solvation=True)
e1 = td1.e_tot[0]
e2 = td2.e_tot[0]
self.assertAlmostEqual((e2-e1)/0.002, de[0,2], 5)
def test_solvent_nuc(self):
def get_nuc(mol):
pcm = ddcosmo.DDCOSMO(mol)
pcm.lmax = 2
pcm.eps = 0
natm = mol.natm
nao = mol.nao
nlm = (pcm.lmax+1)**2
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
pcm.grids = grids = dft.gen_grid.Grids(mol).run(level=0)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, pcm.lmax)
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi)
return nuc_part(pcm, r_vdw, ui, ylm_1sph, cached_pol, L)
pcm = ddcosmo.DDCOSMO(mol0)
pcm.lmax = 2
pcm.eps = 0
natm = mol0.natm
nao = mol0.nao
nlm = (pcm.lmax+1)**2
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
pcm.grids = grids = dft.gen_grid.Grids(mol0).run(level=0)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, pcm.lmax)
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi)
dvmat = nuc_part1(pcm, r_vdw, ui, ylm_1sph, cached_pol, L)
vmat1 = get_nuc(mol1)
vmat2 = get_nuc(mol2)
self.assertAlmostEqual(abs((vmat2-vmat1)/dx - dvmat[0,2]).max(), 0, 8)
nao = mol0.nao
numpy.random.seed(19)
dm = numpy.random.random((nao,nao))
vref = pcm._get_vind(dm)[1]
vmat = 0.5 * get_nuc(mol0)
vmat += pcm._B_dot_x(dm)
self.assertAlmostEqual(abs(vmat-vref).max(), 0, 14)
dm1 = numpy.random.random((2,nao,nao))
de = _ddcosmo_tdscf_grad._grad_ne(pcm, dm1, r_vdw, ui, ylm_1sph, cached_pol, L)
ref = numpy.einsum('azij,nij->naz', dvmat, dm1)
self.assertAlmostEqual(abs(de - ref).max(), 0, 12)
def test_B1(self):
def getB(mol):
pcm = ddcosmo.DDCOSMO(mol)
pcm.lmax = 2
pcm.eps = 0
natm = mol.natm
nao = mol.nao
nlm = (pcm.lmax+1)**2
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
pcm.grids = grids = dft.gen_grid.Grids(mol).run(level=0)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, pcm.lmax)
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi)
return make_B(pcm, r_vdw, ui, ylm_1sph, cached_pol, L)
pcm = ddcosmo.DDCOSMO(mol0)
pcm.lmax = 2
pcm.eps = 0
natm = mol0.natm
nao = mol0.nao
nlm = (pcm.lmax+1)**2
r_vdw = ddcosmo.get_atomic_radii(pcm)
fi = ddcosmo.make_fi(pcm, r_vdw)
ui = 1 - fi
ui[ui<0] = 0
pcm.grids = grids = dft.gen_grid.Grids(mol0).run(level=0)
coords_1sph, weights_1sph = ddcosmo.make_grids_one_sphere(pcm.lebedev_order)
ylm_1sph = numpy.vstack(sph.real_sph_vec(coords_1sph, pcm.lmax, True))
cached_pol = ddcosmo.cache_fake_multipoles(grids, r_vdw, pcm.lmax)
L = ddcosmo.make_L(pcm, r_vdw, ylm_1sph, fi)
dB = make_B1(pcm, r_vdw, ui, ylm_1sph, cached_pol, L)
B1 = getB(mol1)
B2 = getB(mol2)
self.assertAlmostEqual(abs((B2-B1)/dx - dB[0,2]).max(), 0, 8)
nao = mol0.nao
numpy.random.seed(1)
dm1 = numpy.random.random((2,nao,nao))
dm2 = numpy.random.random((2,nao,nao))
dm = dm1[0]
ref = numpy.einsum('azpqrs,npq->nazrs', dB, dm1)
v = B1_dot_x(pcm, dm, r_vdw, ui, ylm_1sph, cached_pol, L)
self.assertAlmostEqual(abs(v-ref[0]).max(), 0, 12)
de = _ddcosmo_tdscf_grad._grad_ee(pcm, dm1, dm2, r_vdw, ui, ylm_1sph, cached_pol, L)
ref = numpy.einsum('nazij,nij->naz', ref, dm2)
self.assertAlmostEqual(abs(de - ref).max(), 0, 12)
numpy.random.seed(1)
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
ref = ddcosmo_grad.kernel(pcm, dm)
dielectric = pcm.eps
if dielectric > 0:
f_epsilon = (dielectric-1.)/dielectric
else:
f_epsilon = 1
de = _ddcosmo_tdscf_grad._grad_nn(pcm, r_vdw, ui, ylm_1sph, cached_pol, L)
de+= _ddcosmo_tdscf_grad._grad_ne(pcm, dm, r_vdw, ui, ylm_1sph, cached_pol, L)
de+= .5*_ddcosmo_tdscf_grad._grad_ee(pcm, dm, dm, r_vdw, ui, ylm_1sph, cached_pol, L)
de *= .5 * f_epsilon
self.assertAlmostEqual(abs(de-ref).max(), 0, 12)
if __name__ == "__main__":
print("Full Tests for ddcosmo gradients")
unittest.main()
|
gkc1000/pyscf
|
pyscf/solvent/test/test_ddcosmo_grad.py
|
Python
|
apache-2.0
| 41,443
|
[
"PySCF"
] |
594204d0dfe521ee54336beb9417bf26dc6b1e0cb44ed9f7ae6b418f3b99d018
|
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
import itertools
import ctc_loss
import IPython
import os
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,num_samples=None):
# # fname = 'Oxford_English_Dictionary.txt'
# # txt = []
# # with open(fname,'rb') as f:
# # txt = f.readlines()
# # txt = [x.decode('utf-8').strip() for x in txt]
# # txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# # List of words
# # word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # # List of definitions
# # def_list = [x.split(' ', 1)[1].strip()for x in txt]
# with open('./training_data/training_data.pkl','rb') as raw:
# word_list,dl=pkl.load(raw)
# def_list=[]
# # def_list=[' '.join(defi) for defi in def_list]
# i=0
# # words={}
# while i<len( dl):
# defi=dl[i]
# if len(defi)>0:
# def_list+=[' '.join(defi)]
# i+=1
# else:
# dl.pop(i)
# word_list.pop(i)
# # for w,d in zip(word_list,def_list):
# # if w not in words:
# # words[w]=[]
# # words[w].append(d)
# # word_list=[]
# # def_list=[]
# # for word in words:
# # word_list.append(word)
# # # def_list.append(random.choice(words[word]))
# # def_list.append(words[word][0])
# maxlen=0
# minlen=100
# for defi in def_list:
# minlen=min(minlen,len(defi.split()))
# maxlen=max(maxlen,len(defi.split()))
# print(minlen)
# print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
# pkl.dump(_map,open('mapaoh.pkl','wb'))
# pkl.dump(rev_map,open('rev_mapaoh.pkl','wb'))
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
# exit()
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# # y = (36665, 56210)
# # print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# np.save('Xaoh',X)
# np.save('yaoh',y)
# np.save('maskaoh',mask)
# exit()
# X=np.load('Xaohex.npy','r')
# y=np.load('yaohex.npy','r')
# mask=np.load('maskaohex.npy','r')
mask=np.load('maskaux.npy','r')
mask=np.concatenate(mask)
X=np.load('Xaux.npy','r')
X=np.concatenate(X)
y=np.load('yaux.npy','r')
y=np.concatenate(y)
auxchoices=np.load('caux.npy','r')
auxchoices=np.concatenate(auxchoices)
auxchoices=auxchoices.flatten()
auxchoices=auxchoices!=np.zeros_like(auxchoices)
mask=mask[auxchoices]
X=X[auxchoices].flatten()
y=y[auxchoices]
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num)
# num=int(num)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
# i+=1
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
print (len(rev_map.keys()))
print(len(_map.keys()))
print ('heyo')
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
num_failed=0
num_counted=0
for word in corpus:
w=word.lower()
num_counted+=1
if w not in _map:
num_failed+=1
mapped=_map[w]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
print 'fuck',num_failed/float(num_counted)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l]=mapped
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
if global_step is None:
global_step=tf.Variable(0,trainiable=False)
self.global_step=global_step
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.int32, [None],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [self.batch_size,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, network_architecture["maxlen"]],name='caption_placeholder')
# print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
self.timestep=tf.placeholder(tf.float32,[],name='timestep')
# Create autoencoder network
to_restore=None
with tf.device('/cpu:0'):
print network_architecture['n_input']
self.embw=tf.Variable(xavier_init(network_architecture['n_input'],network_architecture['n_z']),name='embw')
self.embb=tf.Variable(tf.zeros([network_architecture['n_z']]),name='embb')
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
if embeddings_trainable:
self.saver = tf.train.Saver(max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path)
else:
self.saver= tf.train.Saver(max_to_keep=100)
mod_path=model_path
if use_ctc:
mod_path=mod_path[:-3]
saved_path=tf.train.latest_checkpoint(mod_path.replace('defdef','embtransfer'))
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, saved_path)
self.saver=tf.train.Saver(max_to_keep=100)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
self.embedded_input_KLD_loss=tf.constant(0.0)
self.input_embedding_KLD_loss=tf.constant(0.0)
# def train_encoder():
embedded_input,self.embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[self.batch_size*self.network_architecture['maxlen']]),logit=True)
# print 'eshape',embedded_input.shape
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']])
# print embedded_input.shape
if not vanilla:
self.embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:]
encoder_input=embedded_input[:,1:,:]
cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
cell=tf.contrib.rnn.MultiRNNCell([cell]*lstm_stack)
if not use_bdlstm:
encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
else:
backward_cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
backward_cell=tf.contrib.rnn.MultiRNNCell([backward_cell]*lstm_stack)
encoder_outs,encoder_states=rnn.bidirectional_dynamic_rnn(cell,backward_cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
ix_range=tf.range(0,self.batch_size,1)
ixs=tf.expand_dims(ix_range,-1)
to_cat=tf.expand_dims(seqlen-2,-1)
gather_inds=tf.concat([ixs,to_cat],axis=-1)
print encoder_outs
outs=tf.gather_nd(encoder_outs,gather_inds)
# outs=tf.nn.dropout(outs,.75)
self.deb=tf.gather_nd(self.caption_placeholder[:,1:],gather_inds)
# print outs.shape
input_embedding,self.input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True)
# return input_embedding
# input_embedding=tf.nn.l2_normalize(input_embedding,dim=-1)
self.other_loss=tf.constant(0,dtype=tf.float32)
KLD_penalty=(tf.cast(self.timestep,tf.float32)/1.0)*1e-3
cos_penalty=tf.maximum(-0.1,(tf.cast(self.timestep,tf.float32)/(5.0)))*1e-3
self.input_KLD_loss=tf.constant(0.0)
# def train_decoder():
if form3:
input_embedding,self.input_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['variational_encoding'])
self.input_KLD_loss=tf.reduce_mean(self.input_KLD_loss)*KLD_penalty#\*tf.constant(0.0,dtype=tf.float32)
# normed_embedding= tf.nn.l2_normalize(self.mid_var, dim=-1)
# normed_target=tf.nn.l2_normalize(self.word_var,dim=-1)
# cos_sim=(tf.reduce_sum(tf.multiply(normed_embedding,normed_target),axis=-1))
# # # self.exp_loss=tf.reduce_mean((-cos_sim))
# # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# self.other_loss += tf.reduce_mean(1-(cos_sim))*cos_penalty
# # other_loss+=tf.reduce_mean(tf.reduce_sum(tf.square(_x-input_embedding),axis=-1))*cos_penalty
# _x=tf.concat([input_embedding,_x],axis=-1)
# tempe=tf.Variable(xavier_init(self.network_architecture['n_lstm_input']*2,self.network_architecture['n_lstm_input']),name='emb_cat')
# tempb=tf.Variable(tf.zeros([self.network_architecture['n_lstm_input']]),name='emb_cat_b')
# _x=tf.matmul(_x,tempe)+tempb
# input_embedding=_x
# input_embedding=tf.cond(tf.equal(self.timestep%5,0),train_decoder,train_encoder)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
# if not same_embedding:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
# else:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
# if not embeddings_trainable:
# input_embedding=tf.stop_gradient(input_embedding)
# embed2decoder=tf.Variable(xavier_init(self.network_architecture['n_z_m_2'],self.network_architecture['n_lstm_input']),name='decoder_embedding_weight')
# embed2decoder_bias=tf.Variable(tf.zeros(self.network_architecture['n_lstm_input']),name='decoder_embedding_bias')
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
# input_embedding=tf.matmul(input_embedding,embed2decoder)+embed2decoder_bias
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form4:
current_embedding,KLD_loss=input_embedding,0
elif form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not use_ctc:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
self.debug+=xentropy
loss += xentropy
else:
probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
self.debug=[self.input_KLD_loss,KLD_penalty]
if not use_ctc:
loss_ctc=0
# self.debug=other_loss
# self.debug=[input_KLD_loss,embedded_input_KLD_loss,input_embedding_KLD_loss]
else:
probs=tf.concat(probs,axis=1)
probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:])
loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1)
self.debug=loss_ctc
#
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+loss_ctc+self.input_KLD_loss
print 'makin loss'
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if form3:
n_in=n_input
else:
n_in=n_input
embeddings_trainable=True
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight',trainable=embeddings_trainable),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias',trainable=embeddings_trainable)}
# if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab',trainable=embeddings_trainable)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable)
ols=tf.Variable(xavier_init(n_in, n_z),name='out_log_sigma',trainable=embeddings_trainable)
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': ols,
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')
}
print all_weights['variational_encoding']['out_mean']
# else:
# all_weights['biases_variational_encoding'] = {
# 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable)}
# all_weights['variational_encoding'] = {
# 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable),
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')}
self.untrainable_variables=all_weights['input_meaning'].values()+all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
if mid_vae:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_log_sigma')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_log_sigmab',trainable=embeddings_trainable)
}
else:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable)
}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
if not form3:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
else:
x=self.x
# with tf.device('/cpu:0'):
# x=tf.nn.embedding_lookup(self.embw,self.x)
# x+=self.embb
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x,lookup=True)#,name='input')
self.word_var=z
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
# print z.shape
self.mid_var=z
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if form3:
# with tf.device('/cpu:0'):
# x=tf.nn.embedding_lookup(self.embw,x)
# x+=self.embb
pass
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
# print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False,name=''):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if mid_vae:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon'+name)
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if mid_vae:
print 'stop fucking sampling',mid_vae
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
# print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False,timestep=0):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:timestep})
# print shit
# print deb
# exit()
return cost,shit
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['variational_encoding'])
self.emb_out=input_embedding
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
# print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
# print state,output.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
# print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
# print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word = tf.argmax(logit, 1)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
# print logit.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
# print previous_word.shape
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,self.emb_out], feed_dict={self.x:x})
print f_it
IPython.embed()
# print generated_word_index
# if form2:
# generated_word_index=np.array(bin_to_int(generated_word_index))
# generated_word_index=np.rollaxis(generated_word_index,1)
# else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
global_step=tf.Variable(0,trainable=False)
total_batch = int(n_samples / batch_size)
if should_decay and not gen:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
total_batch, 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
# indlist=np.arange(10*batch_size).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
np.random.shuffle(indlist)
testify=False
avg_loss=0
# for i in range(1):
for i in range(total_batch):
# break
ts=i
# i=0
inds=np.random.choice(indlist,batch_size)
# print indlist[i*batch_size:(i+1)*batch_size]
# batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
batch_xs = X[inds]
# Fit training using batch data
# if epoch==2 and i ==0:
# testify=True
# cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+ts,testify=testify)
cost,loss = vae.partial_fit(batch_xs,y[inds].astype(np.uint32),mask[inds],timestep=(epoch)+1,testify=testify)
# Compute average loss
avg_cost = avg_cost * i /(i+1) +cost/(i+1)
# avg_loss=avg_loss*i/(i+1)+loss/(i+1)
if i% display_step==0:
print avg_cost,loss,cost
if epoch == 0 and ts==0:
costs.append(avg_cost)
costs.append(avg_cost)
# Display logs per epoch step
if epoch % (display_step) == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, os.path.join(model_path,'model'))
pkl.dump(costs,open(loss_output_path,'wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
import sys
form2=True
vanilla=True
if sys.argv[1]!='vanilla':
vanilla=False
mid_vae=False
form3= True
form4=False
vanilla=True
if sys.argv[2]=='mid_vae':
mid_vae=True
print 'mid_vae'
same_embedding=False
clip_grad=True
if sys.argv[3]!='clip':
clip_grad=False
should_save=True
should_train=True
should_train=not should_train
should_continue=False
should_continue=True
should_decay=True
zero_end_tok=True
training_epochs=int(sys.argv[13])
batch_size=int(sys.argv[4])
onehot=False
embeddings_trainable=False
if sys.argv[5]!='transfer':
print 'true embs'
embeddings_trainable=True
transfertype2=True
binary_dim=int(sys.argv[6])
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(50000-3)
else:
X, y, mask, _map = load_text(50000-2)
n_input =50000
n_samples = 30000
lstm_dim=int(sys.argv[7])
model_path = sys.argv[8]
vartype=''
transfertype=''
maxlen=int(sys.argv[9])+2
n_z=int(sys.argv[10])
n_z_m=int(sys.argv[11])
n_z_m_2=int(sys.argv[12])
if not vanilla:
vartype='var'
if not embeddings_trainable:
transfertype='transfer'
cliptype=''
if clip_grad:
cliptype='clip'
use_ctc=False
losstype=''
if sys.argv[14]=='ctc_loss':
use_ctc=True
losstype='ctc'
lstm_stack=int(sys.argv[15])
use_bdlstm=False
bdlstmtype=''
if sys.argv[16]!='forward':
use_bdlstm=True
bdlstmtype='bdlstm'
loss_output_path= 'losses/%s%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%svaedef%soh.pkl'%(bdlstmtype,str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype))
all_samps=len(X)
print 'samps', all_samps
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=n_z, # dimensionality of latent space
n_z_m=n_z_m,
n_z_m_2=n_z_m_2
)
# batch_size=1
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
# np.random.shuffle(ind_list)
test_y=['king','queen','him','her','he','she','man','woman','male','female']
f_map=pkl.load(open('mapaoh.pkl','rb'))
test_words=[f_map[x] for x in test_y]
x_sample=np.array(test_words,dtype=np.uint32)
# x_sample = X[ind_list[:batch_size]]
print x_sample
y_sample = y[ind_list[:batch_size]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
# y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print test_y
# print(y_words)
# print(ixtoword(_map,(np.expand_dims(x_sample[:10],axis=0))))
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
|
dricciardelli/vae2vec
|
vae_def_oh.py
|
Python
|
mit
| 37,009
|
[
"Gaussian"
] |
5d46f9d699bb1958792ea1d08796c2c3adec0015af4ceaab9400c3bea868f94a
|
"""
Modules used in building workflows
"""
import logging
import re
from galaxy import eggs
eggs.require( "elementtree" )
from elementtree.ElementTree import Element
import galaxy.tools
from galaxy import exceptions
from galaxy import model
from galaxy.dataset_collections import matching
from galaxy.web.framework import formbuilder
from galaxy.jobs.actions.post import ActionBox
from galaxy.model import PostJobAction
from galaxy.tools.parameters import check_param, DataToolParameter, DummyDataset, RuntimeValue, visit_input_values
from galaxy.tools.parameters import DataCollectionToolParameter
from galaxy.tools.parameters.wrapped import make_dict_copy
from galaxy.tools.execute import execute
from galaxy.util.bunch import Bunch
from galaxy.util import odict
from galaxy.util.json import loads
from galaxy.util.json import dumps
log = logging.getLogger( __name__ )
class WorkflowModule( object ):
def __init__( self, trans ):
self.trans = trans
## ---- Creating modules from various representations ---------------------
@classmethod
def new( Class, trans, tool_id=None ):
"""
Create a new instance of the module with default state
"""
return Class( trans )
@classmethod
def from_dict( Class, trans, d ):
"""
Create a new instance of the module initialized from values in the
dictionary `d`.
"""
return Class( trans )
@classmethod
def from_workflow_step( Class, trans, step ):
return Class( trans )
## ---- Saving in various forms ------------------------------------------
def save_to_step( self, step ):
step.type = self.type
## ---- General attributes -----------------------------------------------
def get_type( self ):
return self.type
def get_name( self ):
return self.name
def get_tool_id( self ):
return None
def get_tooltip( self, static_path='' ):
return None
## ---- Configuration time -----------------------------------------------
def get_state( self ):
""" Return a serializable representation of the persistable state of
the step - for tools it DefaultToolState.encode returns a string and
for simpler module types a json description is dumped out.
"""
return None
def update_state( self, incoming ):
""" Update the current state of the module against the user supplied
parameters in the dict-like object `incoming`.
"""
pass
def get_errors( self ):
""" It seems like this is effectively just used as boolean - some places
in the tool shed self.errors is set to boolean, other places 'unavailable',
likewise in Galaxy it stores a list containing a string with an unrecognized
tool id error message.
"""
return None
def get_data_inputs( self ):
""" Get configure time data input descriptions. """
return []
def get_data_outputs( self ):
return []
def get_runtime_input_dicts( self, step_annotation ):
""" Get runtime inputs (inputs and parameters) as simple dictionary. """
return []
def get_config_form( self ):
""" Render form that is embedded in workflow editor for modifying the
step state of a node.
"""
raise TypeError( "Abstract method" )
def check_and_update_state( self ):
"""
If the state is not in sync with the current implementation of the
module, try to update. Returns a list of messages to be displayed
"""
pass
def add_dummy_datasets( self, connections=None):
# Replaced connected inputs with DummyDataset values.
pass
## ---- Run time ---------------------------------------------------------
def get_runtime_inputs( self ):
""" Used internally by modules and when displaying inputs in workflow
editor and run workflow templates.
Note: The ToolModule doesn't implement this and these templates contain
specialized logic for dealing with the tool and state directly in the
case of ToolModules.
"""
raise TypeError( "Abstract method" )
def encode_runtime_state( self, trans, state ):
""" Encode the default runtime state at return as a simple `str` for
use in a hidden parameter on the workflow run submission form.
This default runtime state will be combined with user supplied
parameters in `compute_runtime_state` below at workflow invocation time to
actually describe how each step will be executed.
"""
raise TypeError( "Abstract method" )
def compute_runtime_state( self, trans, step_updates=None ):
""" Determine the runtime state (potentially different from self.state
which describes configuration state). This (again unlike self.state) is
currently always a `DefaultToolState` object.
If `step_updates` is `None`, this is likely for rendering the run form
for instance and no runtime properties are available and state must be
solely determined by the default runtime state described by the step.
If `step_updates` are available they describe the runtime properties
supplied by the workflow runner (potentially including a `tool_state`
parameter which is the serialized default encoding state created with
encode_runtime_state above).
"""
raise TypeError( "Abstract method" )
def execute( self, trans, progress, invocation, step ):
""" Execute the given workflow step in the given workflow invocation.
Use the supplied workflow progress object to track outputs, find
inputs, etc...
"""
raise TypeError( "Abstract method" )
class InputModule( WorkflowModule ):
@classmethod
def new( Class, trans, tool_id=None ):
module = Class( trans )
module.state = Class.default_state()
return module
@classmethod
def from_dict( Class, trans, d, secure=True ):
module = Class( trans )
state = loads( d["tool_state"] )
module.recover_state( state )
return module
@classmethod
def from_workflow_step( Class, trans, step ):
module = Class( trans )
module.recover_state( step.tool_inputs )
return module
@classmethod
def default_state( Class ):
""" This method should return a dictionary describing each
configuration property and its default value.
"""
raise TypeError( "Abstract method" )
def get_runtime_input_dicts( self, step_annotation ):
name = self.state.get( "name", self.default_name )
return [ dict( name=name, description=step_annotation ) ]
def recover_state( self, state, **kwds ):
""" Recover state `dict` from simple dictionary describing configuration
state (potentially from persisted step state).
Sub-classes should supply `default_state` method and `state_fields`
attribute which are used to build up the state `dict`.
"""
self.state = self.default_state()
for key in self.state_fields:
if state and key in state:
self.state[ key ] = state[ key ]
def save_to_step( self, step ):
step.type = self.type
step.tool_id = None
step.tool_inputs = self.state
def get_data_inputs( self ):
return []
def get_config_form( self ):
form = self._abstract_config_form( )
return self.trans.fill_template( "workflow/editor_generic_form.mako",
module=self, form=form )
def get_state( self, secure=True ):
return dumps( self.state )
def update_state( self, incoming ):
self.recover_state( incoming )
def get_runtime_state( self ):
state = galaxy.tools.DefaultToolState()
state.inputs = dict( input=None )
return state
def encode_runtime_state( self, trans, state ):
fake_tool = Bunch( inputs=self.get_runtime_inputs() )
return state.encode( fake_tool, trans.app )
def decode_runtime_state( self, trans, string ):
fake_tool = Bunch( inputs=self.get_runtime_inputs() )
state = galaxy.tools.DefaultToolState()
state.decode( string, fake_tool, trans.app )
return state
def update_runtime_state( self, trans, state, values ):
errors = {}
for name, param in self.get_runtime_inputs().iteritems():
value, error = check_param( trans, param, values.get( name, None ), values )
state.inputs[ name ] = value
if error:
errors[ name ] = error
return errors
def compute_runtime_state( self, trans, step_updates=None ):
if step_updates:
# Fix this for multiple inputs
state = self.decode_runtime_state( trans, step_updates.pop( "tool_state" ) )
step_errors = self.update_runtime_state( trans, state, step_updates )
else:
state = self.get_runtime_state()
step_errors = {}
return state, step_errors
def execute( self, trans, progress, invocation, step ):
job, step_outputs = None, dict( output=step.state.inputs['input'])
# Web controller may set copy_inputs_to_history, API controller always sets
# inputs.
if invocation.copy_inputs_to_history:
for input_dataset_hda in step_outputs.values():
content_type = input_dataset_hda.history_content_type
if content_type == "dataset":
new_hda = input_dataset_hda.copy( copy_children=True )
invocation.history.add_dataset( new_hda )
step_outputs[ 'input_ds_copy' ] = new_hda
elif content_type == "dataset_collection":
new_hdca = input_dataset_hda.copy()
invocation.history.add_dataset_collection( new_hdca )
step_outputs[ 'input_ds_copy' ] = new_hdca
else:
raise Exception("Unknown history content encountered")
progress.set_outputs_for_input( step, step_outputs )
return job
class InputDataModule( InputModule ):
type = "data_input"
name = "Input dataset"
default_name = "Input Dataset"
state_fields = [ "name" ]
@classmethod
def default_state( Class ):
return dict( name=Class.default_name )
def _abstract_config_form( self ):
form = formbuilder.FormBuilder( title=self.name ) \
.add_text( "name", "Name", value=self.state['name'] )
return form
def get_data_outputs( self ):
return [ dict( name='output', extensions=['input'] ) ]
def get_runtime_inputs( self, filter_set=['data'] ):
label = self.state.get( "name", "Input Dataset" )
return dict( input=DataToolParameter( None, Element( "param", name="input", label=label, multiple=True, type="data", format=', '.join(filter_set) ), self.trans ) )
class InputDataCollectionModule( InputModule ):
default_name = "Input Dataset Collection"
default_collection_type = "list"
type = "data_collection_input"
name = "Input dataset collection"
collection_type = default_collection_type
state_fields = [ "name", "collection_type" ]
@classmethod
def default_state( Class ):
return dict( name=Class.default_name, collection_type=Class.default_collection_type )
def get_runtime_inputs( self, filter_set=['data'] ):
label = self.state.get( "name", self.default_name )
collection_type = self.state.get( "collection_type", self.default_collection_type )
input_element = Element( "param", name="input", label=label, type="data_collection", collection_type=collection_type )
return dict( input=DataCollectionToolParameter( None, input_element, self.trans ) )
def _abstract_config_form( self ):
type_hints = odict.odict()
type_hints[ "list" ] = "List of Datasets"
type_hints[ "paired" ] = "Dataset Pair"
type_hints[ "list:paired" ] = "List of Dataset Pairs"
type_input = formbuilder.DatalistInput(
name="collection_type",
label="Collection Type",
value=self.state[ "collection_type" ],
extra_attributes=dict(refresh_on_change='true'),
options=type_hints
)
form = formbuilder.FormBuilder(
title=self.name
).add_text(
"name", "Name", value=self.state['name']
)
form.inputs.append( type_input )
return form
def get_data_outputs( self ):
return [ dict( name='output', extensions=['input_collection'], collection_type=self.state[ 'collection_type' ] ) ]
class ToolModule( WorkflowModule ):
type = "tool"
def __init__( self, trans, tool_id ):
self.trans = trans
self.tool_id = tool_id
self.tool = trans.app.toolbox.get_tool( tool_id )
self.post_job_actions = {}
self.workflow_outputs = []
self.state = None
self.version_changes = []
if self.tool:
self.errors = None
else:
self.errors = {}
self.errors[ tool_id ] = 'Tool unavailable'
@classmethod
def new( Class, trans, tool_id=None ):
module = Class( trans, tool_id )
if module.tool is None:
error_message = "Attempted to create new workflow module for invalid tool_id, no tool with id - %s." % tool_id
raise Exception( error_message )
module.state = module.tool.new_state( trans, all_pages=True )
return module
@classmethod
def from_dict( Class, trans, d, secure=True ):
tool_id = d[ 'tool_id' ]
module = Class( trans, tool_id )
module.state = galaxy.tools.DefaultToolState()
if module.tool is not None:
if d.get('tool_version', 'Unspecified') != module.get_tool_version():
module.version_changes.append( "%s: using version '%s' instead of version '%s' indicated in this workflow." % ( tool_id, d.get( 'tool_version', 'Unspecified' ), module.get_tool_version() ) )
module.state.decode( d[ "tool_state" ], module.tool, module.trans.app, secure=secure )
module.errors = d.get( "tool_errors", None )
module.post_job_actions = d.get( "post_job_actions", {} )
module.workflow_outputs = d.get( "workflow_outputs", [] )
return module
@classmethod
def from_workflow_step( Class, trans, step ):
tool_id = step.tool_id
if trans.app.toolbox and tool_id not in trans.app.toolbox.tools_by_id:
# See if we have access to a different version of the tool.
# TODO: If workflows are ever enhanced to use tool version
# in addition to tool id, enhance the selection process here
# to retrieve the correct version of the tool.
tool = trans.app.toolbox.get_tool( tool_id )
if tool:
tool_id = tool.id
if ( trans.app.toolbox and tool_id in trans.app.toolbox.tools_by_id ):
if step.config:
# This step has its state saved in the config field due to the
# tool being previously unavailable.
return module_factory.from_dict(trans, loads(step.config), secure=False)
module = Class( trans, tool_id )
if step.tool_version and (step.tool_version != module.tool.version):
module.version_changes.append("%s: using version '%s' instead of version '%s' indicated in this workflow." % (tool_id, module.tool.version, step.tool_version))
module.recover_state( step.tool_inputs )
module.errors = step.tool_errors
module.workflow_outputs = step.workflow_outputs
pjadict = {}
for pja in step.post_job_actions:
pjadict[pja.action_type] = pja
module.post_job_actions = pjadict
return module
return None
def recover_state( self, state, **kwds ):
""" Recover module configuration state property (a `DefaultToolState`
object) using the tool's `params_from_strings` method.
"""
app = self.trans.app
self.state = galaxy.tools.DefaultToolState()
params_from_kwds = dict(
ignore_errors=kwds.get( "ignore_errors", True )
)
self.state.inputs = self.tool.params_from_strings( state, app, **params_from_kwds )
@classmethod
def __get_tool_version( cls, trans, tool_id ):
# Return a ToolVersion if one exists for tool_id.
return trans.install_model.context.query( trans.install_model.ToolVersion ) \
.filter( trans.install_model.ToolVersion.table.c.tool_id == tool_id ) \
.first()
def save_to_step( self, step ):
step.type = self.type
step.tool_id = self.tool_id
if self.tool:
step.tool_version = self.get_tool_version()
step.tool_inputs = self.tool.params_to_strings( self.state.inputs, self.trans.app )
else:
step.tool_version = None
step.tool_inputs = None
step.tool_errors = self.errors
for k, v in self.post_job_actions.iteritems():
# Must have action_type, step. output and a_args are optional.
if 'output_name' in v:
output_name = v['output_name']
else:
output_name = None
if 'action_arguments' in v:
action_arguments = v['action_arguments']
else:
action_arguments = None
self.trans.sa_session.add(PostJobAction(v['action_type'], step, output_name, action_arguments))
def get_name( self ):
if self.tool:
return self.tool.name
return 'unavailable'
def get_tool_id( self ):
return self.tool_id
def get_tool_version( self ):
return self.tool.version
def get_state( self, secure=True ):
return self.state.encode( self.tool, self.trans.app, secure=secure )
def get_errors( self ):
return self.errors
def get_tooltip( self, static_path='' ):
if self.tool.help:
return self.tool.help.render( static_path=static_path )
else:
return None
def get_data_inputs( self ):
data_inputs = []
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
data_inputs.append( dict(
name=prefixed_name,
label=prefixed_label,
multiple=input.multiple,
extensions=input.extensions,
input_type="dataset", ) )
if isinstance( input, DataCollectionToolParameter ):
data_inputs.append( dict(
name=prefixed_name,
label=prefixed_label,
multiple=input.multiple,
input_type="dataset_collection",
collection_type=input.collection_type,
extensions=input.extensions,
) )
visit_input_values( self.tool.inputs, self.state.inputs, callback )
return data_inputs
def get_data_outputs( self ):
data_outputs = []
data_inputs = None
for name, tool_output in self.tool.outputs.iteritems():
if tool_output.format_source != None:
formats = [ 'input' ] # default to special name "input" which remove restrictions on connections
if data_inputs == None:
data_inputs = self.get_data_inputs()
# find the input parameter referenced by format_source
for di in data_inputs:
# input names come prefixed with conditional and repeat names separated by '|'
# remove prefixes when comparing with format_source
if di['name'] != None and di['name'].split('|')[-1] == tool_output.format_source:
formats = di['extensions']
else:
formats = [ tool_output.format ]
for change_elem in tool_output.change_format:
for when_elem in change_elem.findall( 'when' ):
format = when_elem.get( 'format', None )
if format and format not in formats:
formats.append( format )
data_outputs.append( dict( name=name, extensions=formats ) )
return data_outputs
def get_runtime_input_dicts( self, step_annotation ):
# Step is a tool and may have runtime inputs.
input_dicts = []
for name, val in self.state.inputs.items():
input_type = type( val )
if input_type == RuntimeValue:
input_dicts.append( { "name": name, "description": "runtime parameter for tool %s" % self.get_name() } )
elif input_type == dict:
# Input type is described by a dict, e.g. indexed parameters.
for partval in val.values():
if type( partval ) == RuntimeValue:
input_dicts.append( { "name": name, "description": "runtime parameter for tool %s" % self.get_name() } )
return input_dicts
def get_post_job_actions( self ):
return self.post_job_actions
def get_config_form( self ):
self.add_dummy_datasets()
return self.trans.fill_template( "workflow/editor_tool_form.mako",
tool=self.tool, values=self.state.inputs, errors=( self.errors or {} ) )
def encode_runtime_state( self, trans, state ):
return state.encode( self.tool, self.trans.app )
def update_state( self, incoming ):
# Build a callback that handles setting an input to be required at
# runtime. We still process all other parameters the user might have
# set. We also need to make sure all datasets have a dummy value
# for dependencies to see
self.post_job_actions = ActionBox.handle_incoming(incoming)
make_runtime_key = incoming.get( 'make_runtime', None )
make_buildtime_key = incoming.get( 'make_buildtime', None )
def item_callback( trans, key, input, value, error, old_value, context ):
# Dummy value for Data parameters
if isinstance( input, DataToolParameter ) or isinstance( input, DataCollectionToolParameter ):
return DummyDataset(), None
# Deal with build/runtime (does not apply to Data parameters)
if key == make_buildtime_key:
return input.get_initial_value( trans, context ), None
elif isinstance( old_value, RuntimeValue ):
return old_value, None
elif key == make_runtime_key:
return RuntimeValue(), None
elif isinstance(value, basestring) and re.search("\$\{.+?\}", str(value)):
# Workflow Parameter Replacement, so suppress error from going to the workflow level.
return value, None
else:
return value, error
# Update state using incoming values
errors = self.tool.update_state( self.trans, self.tool.inputs, self.state.inputs, incoming, item_callback=item_callback )
self.errors = errors or None
def check_and_update_state( self ):
return self.tool.check_and_update_param_values( self.state.inputs, self.trans, allow_workflow_parameters=True )
def compute_runtime_state( self, trans, step_updates=None ):
# Warning: This method destructively modifies existing step state.
step_errors = None
state = self.state
if step_updates:
# Get the tool
tool = self.tool
# Get old errors
old_errors = state.inputs.pop( "__errors__", {} )
# Update the state
step_errors = tool.update_state( trans, tool.inputs, state.inputs, step_updates,
update_only=True, old_errors=old_errors )
return state, step_errors
def execute( self, trans, progress, invocation, step ):
tool = trans.app.toolbox.get_tool( step.tool_id )
tool_state = step.state
collections_to_match = self._find_collections_to_match( tool, progress, step )
# Have implicit collections...
if collections_to_match.has_collections():
collection_info = self.trans.app.dataset_collections_service.match_collections( collections_to_match )
else:
collection_info = None
param_combinations = []
if collection_info:
iteration_elements_iter = collection_info.slice_collections()
else:
iteration_elements_iter = [ None ]
for iteration_elements in iteration_elements_iter:
execution_state = tool_state.copy()
# TODO: Move next step into copy()
execution_state.inputs = make_dict_copy( execution_state.inputs )
# Connect up
def callback( input, value, prefixed_name, prefixed_label ):
replacement = None
if isinstance( input, DataToolParameter ) or isinstance( input, DataCollectionToolParameter ):
if iteration_elements and prefixed_name in iteration_elements:
if isinstance( input, DataToolParameter ):
# Pull out dataset instance from element.
replacement = iteration_elements[ prefixed_name ].dataset_instance
else:
# If collection - just use element model object.
replacement = iteration_elements[ prefixed_name ]
else:
replacement = progress.replacement_for_tool_input( step, input, prefixed_name )
return replacement
try:
# Replace DummyDatasets with historydatasetassociations
visit_input_values( tool.inputs, execution_state.inputs, callback )
except KeyError, k:
message_template = "Error due to input mapping of '%s' in '%s'. A common cause of this is conditional outputs that cannot be determined until runtime, please review your workflow."
message = message_template % (tool.name, k.message)
raise exceptions.MessageException( message )
param_combinations.append( execution_state.inputs )
execution_tracker = execute(
trans=self.trans,
tool=tool,
param_combinations=param_combinations,
history=invocation.history,
collection_info=collection_info,
workflow_invocation_uuid=invocation.uuid
)
if collection_info:
step_outputs = dict( execution_tracker.created_collections )
else:
step_outputs = dict( execution_tracker.output_datasets )
progress.set_step_outputs( step, step_outputs )
jobs = execution_tracker.successful_jobs
for job in jobs:
self._handle_post_job_actions( step, job, invocation.replacement_dict )
return jobs
def _find_collections_to_match( self, tool, progress, step ):
collections_to_match = matching.CollectionsToMatch()
def callback( input, value, prefixed_name, prefixed_label ):
is_data_param = isinstance( input, DataToolParameter )
if is_data_param and not input.multiple:
data = progress.replacement_for_tool_input( step, input, prefixed_name )
if isinstance( data, model.HistoryDatasetCollectionAssociation ):
collections_to_match.add( prefixed_name, data )
is_data_collection_param = isinstance( input, DataCollectionToolParameter )
if is_data_collection_param and not input.multiple:
data = progress.replacement_for_tool_input( step, input, prefixed_name )
history_query = input._history_query( self.trans )
if history_query.can_map_over( data ):
collections_to_match.add( prefixed_name, data, subcollection_type=input.collection_type )
visit_input_values( tool.inputs, step.state.inputs, callback )
return collections_to_match
def _handle_post_job_actions( self, step, job, replacement_dict ):
# Create new PJA associations with the created job, to be run on completion.
# PJA Parameter Replacement (only applies to immediate actions-- rename specifically, for now)
# Pass along replacement dict with the execution of the PJA so we don't have to modify the object.
for pja in step.post_job_actions:
if pja.action_type in ActionBox.immediate_actions:
ActionBox.execute( self.trans.app, self.trans.sa_session, pja, job, replacement_dict )
else:
job.add_post_job_action( pja )
def add_dummy_datasets( self, connections=None):
if connections:
# Store onnections by input name
input_connections_by_name = \
dict( ( conn.input_name, conn ) for conn in connections )
else:
input_connections_by_name = {}
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
def callback( input, value, prefixed_name, prefixed_label ):
replacement = None
if isinstance( input, DataToolParameter ):
if connections is None or prefixed_name in input_connections_by_name:
if input.multiple:
replacement = [] if not connections else [DummyDataset() for conn in connections]
else:
replacement = DummyDataset()
elif isinstance( input, DataCollectionToolParameter ):
if connections is None or prefixed_name in input_connections_by_name:
replacement = DummyDataset()
return replacement
visit_input_values( self.tool.inputs, self.state.inputs, callback )
class WorkflowModuleFactory( object ):
def __init__( self, module_types ):
self.module_types = module_types
def new( self, trans, type, tool_id=None ):
"""
Return module for type and (optional) tool_id intialized with
new / default state.
"""
assert type in self.module_types
return self.module_types[type].new( trans, tool_id )
def from_dict( self, trans, d, **kwargs ):
"""
Return module initialized from the data in dictionary `d`.
"""
type = d['type']
assert type in self.module_types
return self.module_types[type].from_dict( trans, d, **kwargs )
def from_workflow_step( self, trans, step ):
"""
Return module initializd from the WorkflowStep object `step`.
"""
type = step.type
return self.module_types[type].from_workflow_step( trans, step )
def is_tool_module_type( module_type ):
return not module_type or module_type == "tool"
module_types = dict(
data_input=InputDataModule,
data_collection_input=InputDataCollectionModule,
tool=ToolModule,
)
module_factory = WorkflowModuleFactory( module_types )
class MissingToolException( Exception ):
""" WorkflowModuleInjector will raise this if the tool corresponding to the
module is missing. """
class WorkflowModuleInjector(object):
""" Injects workflow step objects from the ORM with appropriate module and
module generated/influenced state. """
def __init__( self, trans ):
self.trans = trans
def inject( self, step, step_args=None ):
""" Pre-condition: `step` is an ORM object coming from the database, if
supplied `step_args` is the representation of the inputs for that step
supplied via web form.
Post-condition: The supplied `step` has new non-persistent attributes
useful during workflow invocation. These include 'upgrade_messages',
'state', 'input_connections_by_name', and 'module'.
If step_args is provided from a web form this is applied to generate
'state' else it is just obtained from the database.
"""
trans = self.trans
step_errors = None
step.upgrade_messages = {}
# Make connection information available on each step by input name.
input_connections_by_name = {}
for conn in step.input_connections:
input_name = conn.input_name
if not input_name in input_connections_by_name:
input_connections_by_name[input_name] = []
input_connections_by_name[input_name].append(conn)
step.input_connections_by_name = input_connections_by_name
# Populate module.
module = step.module = module_factory.from_workflow_step( trans, step )
# Calculating step errors and state depends on whether step is a tool step or not.
if not module:
step.module = None
step.state = None
raise MissingToolException()
# Fix any missing parameters
step.upgrade_messages = module.check_and_update_state()
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
module.add_dummy_datasets( connections=step.input_connections )
state, step_errors = module.compute_runtime_state( trans, step_args )
step.state = state
return step_errors
def populate_module_and_state( trans, workflow, param_map ):
""" Used by API but not web controller, walks through a workflow's steps
and populates transient module and state attributes on each.
"""
module_injector = WorkflowModuleInjector( trans )
for step in workflow.steps:
step_args = param_map.get( step.id, {} )
step_errors = module_injector.inject( step, step_args=step_args )
if step.type == 'tool' or step.type is None:
if step_errors:
message = "Workflow cannot be run because of validation errors in some steps: %s" % step_errors
raise exceptions.MessageException( message )
if step.upgrade_messages:
message = "Workflow cannot be run because of step upgrade messages: %s" % step.upgrade_messages
raise exceptions.MessageException( message )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/workflow/modules.py
|
Python
|
gpl-3.0
| 34,906
|
[
"Galaxy"
] |
371cb2cc6c04d92c46a6b6bae7d667bf622ba3eea0772141b3d0ceb9390ae3d8
|
# -*- coding: utf-8 -*-
from behave import given, then, when
import selenium.webdriver.support.ui as ui
@given("we have an existing user")
def given_existing_user(context):
context.test_user = {
'fullname': 'Alyssa P Hacker',
'email': 'alyssa@hacker.com',
'username': 'alyssa',
'password': 'alyssa',
'confirm_password': 'alyssa',
}
context.browser.visit('/register')
assert context.browser.find_element_by_name('csrf_token').is_enabled()
for k, v in context.test_user.items():
context.browser.find_element_by_name(k).send_keys(v)
register_form = context.browser.find_element_by_id('form-register')
register_form.submit()
@when("the user tries to log in")
def when_login_form_submit(context):
context.login_data = {
'username': context.test_user['username'],
'password': context.test_user['password'],
}
wait = ui.WebDriverWait(context.browser, 30)
context.browser.visit('/login')
assert context.browser.find_element_by_name('csrf_token').is_enabled()
context.browser.find_element_by_id('showmore').click()
for k, v in context.login_data.items():
context.browser.find_element_by_name(k).send_keys(v)
context.browser.find_element_by_name('username').submit()
context.user_button = wait.until(
lambda browser: browser.find_element_by_id('hg-user-btn')
)
@then("we log the user in")
def user_login(context):
assert context.user_button.is_enabled()
|
hasgeek/lastuser
|
features/steps/login.py
|
Python
|
bsd-2-clause
| 1,508
|
[
"VisIt"
] |
f2a672ac923452e40420c7aa627674cc592d91b001319adac16c47fbd06200c0
|
# GWR Bandwidth selection class
#Thinking about removing the search method and just having optimization begin in
#class __init__
#x_glob and offset parameters dont yet do anything; former is for semiparametric
#GWR and later is for offset variable for Poisson model
__author__ = "Taylor Oshan Tayoshan@gmail.com"
from kernels import *
from search import golden_section, equal_interval, flexible_bw
from gwr import GWR
from crankshaft.regression.glm.family import Gaussian, Poisson, Binomial
import pysal.spreg.user_output as USER
from diagnostics import get_AICc, get_AIC, get_BIC, get_CV
from scipy.spatial.distance import pdist, squareform
from pysal.common import KDTree
import numpy as np
kernels = {1: fix_gauss, 2: adapt_gauss, 3: fix_bisquare, 4:
adapt_bisquare, 5: fix_exp, 6:adapt_exp}
getDiag = {'AICc': get_AICc,'AIC':get_AIC, 'BIC': get_BIC, 'CV': get_CV}
class Sel_BW(object):
"""
Select bandwidth for kernel
Methods: p211 - p213, bandwidth selection
Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).
Geographically weighted regression: the analysis of spatially varying relationships.
Parameters
----------
y : array
n*1, dependent variable.
x_glob : array
n*k1, fixed independent variable.
x_loc : array
n*k2, local independent variable, including constant.
coords : list of tuples
(x,y) of points used in bandwidth selection
family : string
GWR model type: 'Gaussian', 'logistic, 'Poisson''
offset : array
n*1, offset variable for Poisson model
kernel : string
kernel function: 'gaussian', 'bisquare', 'exponetial'
fixed : boolean
True for fixed bandwidth and False for adaptive (NN)
fb : True for flexible (mutliple covaraite-specific) bandwidths
False for a traditional (same for all covariates)
bandwdith; defualt is False.
constant : boolean
True to include intercept (default) in model and False to exclude
intercept.
Attributes
----------
y : array
n*1, dependent variable.
x_glob : array
n*k1, fixed independent variable.
x_loc : array
n*k2, local independent variable, including constant.
coords : list of tuples
(x,y) of points used in bandwidth selection
family : string
GWR model type: 'Gaussian', 'logistic, 'Poisson''
kernel : string
type of kernel used and wether fixed or adaptive
criterion : string
bw selection criterion: 'AICc', 'AIC', 'BIC', 'CV'
search : string
bw search method: 'golden', 'interval'
bw_min : float
min value used in bandwidth search
bw_max : float
max value used in bandwidth search
interval : float
interval increment used in interval search
tol : float
tolerance used to determine convergence
max_iter : integer
max interations if no convergence to tol
fb : True for flexible (mutliple covaraite-specific) bandwidths
False for a traditional (same for all covariates)
bandwdith; defualt is False.
constant : boolean
True to include intercept (default) in model and False to exclude
intercept.
"""
def __init__(self, coords, y, x_loc, x_glob=None, family=Gaussian(),
offset=None, kernel='bisquare', fixed=False, fb=False, constant=True):
self.coords = coords
self.y = y
self.x_loc = x_loc
if x_glob is not None:
self.x_glob = x_glob
else:
self.x_glob = []
self.family=family
self.fixed = fixed
self.kernel = kernel
if offset is None:
self.offset = np.ones((len(y), 1))
else:
self.offset = offset * 1.0
self.fb = fb
self.constant = constant
def search(self, search='golden_section', criterion='AICc', bw_min=0.0,
bw_max=0.0, interval=0.0, tol=1.0e-6, max_iter=200, init_fb=True,
tol_fb=1.0e-5, rss_score=False, max_iter_fb=200):
"""
Parameters
----------
criterion : string
bw selection criterion: 'AICc', 'AIC', 'BIC', 'CV'
search : string
bw search method: 'golden', 'interval'
bw_min : float
min value used in bandwidth search
bw_max : float
max value used in bandwidth search
interval : float
interval increment used in interval search
tol : float
tolerance used to determine convergence
max_iter : integer
max iterations if no convergence to tol
init_fb : True to initialize flexible bandwidth search with
esitmates from a traditional GWR and False to
initialize flexible bandwidth search with global
regression estimates
tol_fb : convergence tolerence for the flexible bandwidth
backfitting algorithm; a larger tolerance may stop the
algorith faster though it may result in a less optimal
model
max_iter_fb : max iterations if no convergence to tol for flexible
bandwidth backfittign algorithm
rss_score : True to use the residual sum of sqaures to evaluate
each iteration of the flexible bandwidth backfitting
routine and False to use a smooth function; default is
False
Returns
-------
bw : scalar or array
optimal bandwidth value or values; returns scalar for
fb=False and array for fb=True; ordering of bandwidths
matches the ordering of the covariates (columns) of the
designs matrix, X
"""
self.search = search
self.criterion = criterion
self.bw_min = bw_min
self.bw_max = bw_max
self.interval = interval
self.tol = tol
self.max_iter = max_iter
self.init_fb = init_fb
self.tol_fb = tol_fb
self.rss_score = rss_score
self.max_iter_fb = max_iter_fb
if self.fixed:
if self.kernel == 'gaussian':
ktype = 1
elif self.kernel == 'bisquare':
ktype = 3
elif self.kernel == 'exponential':
ktype = 5
else:
raise TypeError('Unsupported kernel function ', self.kernel)
else:
if self.kernel == 'gaussian':
ktype = 2
elif self.kernel == 'bisquare':
ktype = 4
elif self.kernel == 'exponential':
ktype = 6
else:
raise TypeError('Unsupported kernel function ', self.kernel)
function = lambda bw: getDiag[criterion](
GWR(self.coords, self.y, self.x_loc, bw, family=self.family,
kernel=self.kernel, fixed=self.fixed, offset=self.offset).fit())
if ktype % 2 == 0:
int_score = True
else:
int_score = False
self.int_score = int_score
if self.fb:
self._fbw()
print self.bw[1]
self.XB = self.bw[4]
self.err = self.bw[5]
else:
self._bw()
return self.bw[0]
def _bw(self):
gwr_func = lambda bw: getDiag[self.criterion](
GWR(self.coords, self.y, self.x_loc, bw, family=self.family,
kernel=self.kernel, fixed=self.fixed, constant=self.constant).fit())
if self.search == 'golden_section':
a,c = self._init_section(self.x_glob, self.x_loc, self.coords,
self.constant)
delta = 0.38197 #1 - (np.sqrt(5.0)-1.0)/2.0
self.bw = golden_section(a, c, delta, gwr_func, self.tol,
self.max_iter, self.int_score)
elif self.search == 'interval':
self.bw = equal_interval(self.bw_min, self.bw_max, self.interval,
gwr_func, self.int_score)
else:
raise TypeError('Unsupported computational search method ', search)
def _fbw(self):
y = self.y
if self.constant:
X = USER.check_constant(self.x_loc)
else:
X = self.x_loc
n, k = X.shape
family = self.family
offset = self.offset
kernel = self.kernel
fixed = self.fixed
coords = self.coords
search = self.search
criterion = self.criterion
bw_min = self.bw_min
bw_max = self.bw_max
interval = self.interval
tol = self.tol
max_iter = self.max_iter
gwr_func = lambda y, X, bw: GWR(coords, y, X, bw, family=family,
kernel=kernel, fixed=fixed, offset=offset, constant=False).fit()
bw_func = lambda y, X: Sel_BW(coords, y, X, x_glob=[], family=family,
kernel=kernel, fixed=fixed, offset=offset, constant=False)
sel_func = lambda bw_func: bw_func.search(search=search,
criterion=criterion, bw_min=bw_min, bw_max=bw_max,
interval=interval, tol=tol, max_iter=max_iter)
self.bw = flexible_bw(self.init_fb, y, X, n, k, family, self.tol_fb,
self.max_iter_fb, self.rss_score, gwr_func, bw_func, sel_func)
def _init_section(self, x_glob, x_loc, coords, constant):
if len(x_glob) > 0:
n_glob = x_glob.shape[1]
else:
n_glob = 0
if len(x_loc) > 0:
n_loc = x_loc.shape[1]
else:
n_loc = 0
if constant:
n_vars = n_glob + n_loc + 1
else:
n_vars = n_glob + n_loc
n = np.array(coords).shape[0]
if self.int_score:
a = 40 + 2 * n_vars
c = n
else:
nn = 40 + 2 * n_vars
sq_dists = squareform(pdist(coords))
sort_dists = np.sort(sq_dists, axis=1)
min_dists = sort_dists[:,nn-1]
max_dists = sort_dists[:,-1]
a = np.min(min_dists)/2.0
c = np.max(max_dists)/2.0
if a < self.bw_min:
a = self.bw_min
if c > self.bw_max and self.bw_max > 0:
c = self.bw_max
return a, c
|
CartoDB/crankshaft
|
release/python/0.8.2/crankshaft/crankshaft/regression/gwr/base/sel_bw.py
|
Python
|
bsd-3-clause
| 11,262
|
[
"Gaussian"
] |
de777aecd7343677a4479d54a116bb6a1575ef6116fd374d7edf4d77f8ae9bf9
|
# Copyright (C) 2010 CAMd
# Please see the accompanying LICENSE file for further information.
"""This module provides all the classes and functions associated with the
evaluation of exact exchange with k-point sampling."""
from math import pi, sqrt
import sys
import numpy as np
from ase import Atoms
from ase.units import Ha
from time import ctime
from gpaw.xc import XC
from gpaw.xc.kernel import XCNull
from gpaw.xc.functional import XCFunctional
from gpaw.utilities import pack, unpack2, packed_index, devnull
from gpaw.lfc import LFC
from gpaw.wavefunctions.pw import PWDescriptor
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.mpi import world, rank
class KPoint:
def __init__(self, kd, kpt=None):
"""Helper class for parallelizing over k-points.
Placeholder for wave functions, occupation numbers,
projections, and global k-point index."""
self.kd = kd
if kpt is not None:
self.psit_nG = kpt.psit_nG
self.f_n = kpt.f_n / kpt.weight / kd.nbzkpts * 2 / kd.nspins
self.weight = 1. / kd.nbzkpts * 2 / kd.nspins
self.eps_n = kpt.eps_n
self.P_ani = kpt.P_ani
self.k = kpt.k
self.s = kpt.s
self.requests = []
def next(self):
"""Create empty object.
Data will be received from other processor."""
kpt = KPoint(self.kd)
# intialize array for receiving:
kpt.psit_nG = np.empty_like(self.psit_nG)
kpt.f_n = np.empty_like(self.f_n)
# Total number of projector functions:
I = sum([P_ni.shape[1] for P_ni in self.P_ani.values()])
kpt.P_In = np.empty((I, len(kpt.f_n)), complex)
kpt.P_ani = {}
I1 = 0
for a, P_ni in self.P_ani.items():
I2 = I1 + P_ni.shape[1]
kpt.P_ani[a] = kpt.P_In[I1:I2].T
I1 = I2
kpt.k = (self.k + 1) % self.kd.nibzkpts
kpt.s = self.s
return kpt
def start_sending(self, rank):
P_In = np.concatenate([P_ni.T for P_ni in self.P_ani.values()])
self.requests += [
self.kd.comm.send(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.send(self.f_n, rank, block=False, tag=2),
self.kd.comm.send(P_In, rank, block=False, tag=3)]
def start_receiving(self, rank):
self.requests += [
self.kd.comm.receive(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.receive(self.f_n, rank, block=False, tag=2),
self.kd.comm.receive(self.P_In, rank, block=False, tag=3)]
def wait(self):
self.kd.comm.waitall(self.requests)
self.requests = []
class HybridXC(XCFunctional):
orbital_dependent = True
def __init__(self, name, hybrid=None, xc=None, finegrid=False,
alpha=None, skip_gamma=False, gygi=False, acdf=True,
qsym=True, txt=None, ecut=None):
"""Mix standard functionals with exact exchange.
name: str
Name of hybrid functional.
hybrid: float
Fraction of exact exchange.
xc: str or XCFunctional object
Standard DFT functional with scaled down exchange.
finegrid: boolean
Use fine grid for energy functional evaluations?
"""
if name == 'EXX':
assert hybrid is None and xc is None
hybrid = 1.0
xc = XC(XCNull())
elif name == 'PBE0':
assert hybrid is None and xc is None
hybrid = 0.25
xc = XC('HYB_GGA_XC_PBEH')
elif name == 'B3LYP':
assert hybrid is None and xc is None
hybrid = 0.2
xc = XC('HYB_GGA_XC_B3LYP')
if isinstance(xc, str):
xc = XC(xc)
self.hybrid = hybrid
self.xc = xc
self.type = xc.type
self.alpha = alpha
self.qsym = qsym
self.skip_gamma = skip_gamma
self.gygi = gygi
self.acdf = acdf
self.exx = None
self.ecut = ecut
if txt is None:
if rank == 0:
#self.txt = devnull
self.txt = sys.stdout
else:
sys.stdout = devnull
self.txt = devnull
else:
assert type(txt) is str
from ase.parallel import paropen
self.txt = paropen(txt, 'w')
XCFunctional.__init__(self, name)
def get_setup_name(self):
return 'PBE'
def calculate_radial(self, rgd, n_sLg, Y_L, v_sg,
dndr_sLg=None, rnablaY_Lv=None,
tau_sg=None, dedtau_sg=None):
return self.xc.calculate_radial(rgd, n_sLg, Y_L, v_sg,
dndr_sLg, rnablaY_Lv)
def calculate_paw_correction(self, setup, D_sp, dEdD_sp=None,
addcoredensity=True, a=None):
return self.xc.calculate_paw_correction(setup, D_sp, dEdD_sp,
addcoredensity, a)
def initialize(self, density, hamiltonian, wfs, occupations):
self.xc.initialize(density, hamiltonian, wfs, occupations)
self.nspins = wfs.nspins
self.setups = wfs.setups
self.density = density
self.kpt_u = wfs.kpt_u
self.wfs = wfs
self.gd = density.gd
self.kd = wfs.kd
self.bd = wfs.bd
N_c = self.gd.N_c
N = self.gd.N_c.prod()
vol = self.gd.dv * N
if self.alpha is None:
# XXX ?
self.alpha = 6 * vol**(2 / 3.0) / pi**2
self.gamma = (vol / (2 * pi)**2 * sqrt(pi / self.alpha) *
self.kd.nbzkpts)
if self.ecut is None:
self.ecut = 0.5 * pi**2 / (self.gd.h_cv**2).sum(1).max() * 0.9999
assert self.kd.N_c is not None
n = self.kd.N_c * 2 - 1
bzk_kc = np.indices(n).transpose((1, 2, 3, 0))
bzk_kc.shape = (-1, 3)
bzk_kc -= self.kd.N_c - 1
self.bzk_kc = bzk_kc.astype(float) / self.kd.N_c
self.bzq_qc = self.kd.get_bz_q_points()
if self.qsym:
op_scc = self.kd.symmetry.op_scc
self.ibzq_qc = self.kd.get_ibz_q_points(self.bzq_qc,
op_scc)[0]
self.q_weights = self.kd.q_weights * len(self.bzq_qc)
else:
self.ibzq_qc = self.bzq_qc
self.q_weights = np.ones(len(self.bzq_qc))
self.pwd = PWDescriptor(self.ecut, self.gd, complex)
self.G2_qG = self.pwd.g2(self.bzk_kc)
n = 0
for k_c, Gpk2_G in zip(self.bzk_kc[:], self.G2_qG):
if (k_c > -0.5).all() and (k_c <= 0.5).all(): #XXX???
if k_c.any():
self.gamma -= np.dot(np.exp(-self.alpha * Gpk2_G),
Gpk2_G**-1)
else:
self.gamma -= np.dot(np.exp(-self.alpha * Gpk2_G[1:]),
Gpk2_G[1:]**-1)
n += 1
assert n == self.kd.N_c.prod()
self.pwd = PWDescriptor(self.ecut, self.gd, complex)
self.G2_qG = self.pwd.g2(self.ibzq_qc)
self.ghat = LFC(self.gd,
[setup.ghat_l for setup in density.setups],
KPointDescriptor(self.bzq_qc), dtype=complex)
#self.interpolator = density.interpolator
self.print_initialization(hamiltonian.xc.name)
def set_positions(self, spos_ac):
self.ghat.set_positions(spos_ac)
self.spos_ac = spos_ac
def calculate(self, gd, n_sg, v_sg=None, e_g=None):
# Normal XC contribution:
exc = self.xc.calculate(gd, n_sg, v_sg, e_g)
# Add EXX contribution:
return exc + self.exx
def calculate_exx(self):
"""Non-selfconsistent calculation."""
kd = self.kd
K = len(kd.bzk_kc)
W = world.size // self.nspins
parallel = (W > 1)
self.exx = 0.0
self.exx_kq = np.zeros((K, len(self.ibzq_qc)), float)
for s in range(self.nspins):
ibz_kpts = [KPoint(kd, kpt)
for kpt in self.kpt_u if kpt.s == s]
for ik, kpt in enumerate(kd.bzk_kc):
print >> self.txt, 'K %s %s ...' % (ik, kpt)
for iq, q in enumerate(self.ibzq_qc):
kpq = kd.find_k_plus_q(q, kpts_k=[ik])
self.apply(ibz_kpts[kd.bz2ibz_k[ik]],
ibz_kpts[kd.bz2ibz_k[kpq[0]]],
ik, kpq[0], iq)
self.exx = world.sum(self.exx)
self.exx += self.calculate_exx_paw_correction()
exx_q = np.sum(self.exx_kq, 0)
print >> self.txt
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt
print >> self.txt, 'Contributions: q w E_q (eV)'
for q in range(len(exx_q)):
print >> self.txt, '[%1.3f %1.3f %1.3f] %1.3f %s' % \
(self.ibzq_qc[q][0], self.ibzq_qc[q][1], self.ibzq_qc[q][2],
self.q_weights[q]/len(self.bzq_qc),
exx_q[q]/self.q_weights[q]*len(self.bzq_qc)*Ha)
print >> self.txt, 'E_EXX = %s eV' % (self.exx*Ha)
print >> self.txt
print >> self.txt, 'Calculation completed at: ', ctime()
print >> self.txt
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt
def apply(self, kpt1, kpt2, ik1, ik2, iq):
k1_c = self.kd.bzk_kc[ik1]
k2_c = self.kd.bzk_kc[ik2]
q = self.ibzq_qc[iq]
if self.qsym:
for i, q in enumerate(self.bzq_qc):
if abs(q - self.ibzq_qc[iq]).max() < 1e-9:
bzq_index = i
break
else:
bzq_index = iq
N_c = self.gd.N_c
eikr_R = np.exp(-2j * pi * np.dot(np.indices(N_c).T, q / N_c).T)
Gamma = abs(q).max() < 1e-9
if Gamma and self.skip_gamma:
return
Gpk2_G = self.G2_qG[iq]
if Gamma:
Gpk2_G = Gpk2_G.copy()
Gpk2_G[0] = 1.0 / self.gamma
N = N_c.prod()
vol = self.gd.dv * N
nspins = self.nspins
fcut = 1e-10
for n1, psit1_R in enumerate(kpt1.psit_nG):
f1 = kpt1.f_n[n1]
for n2, psit2_R in enumerate(kpt2.psit_nG):
if self.acdf:
if self.gygi and Gamma:
#print n2, kpt2.f_n[n2]/kpt2.weight
f2 = (self.q_weights[iq] * kpt2.weight)
else:
f2 = (self.q_weights[iq] * kpt2.weight
* (1 - np.sign(kpt2.eps_n[n2] - kpt1.eps_n[n1])))
else:
f2 = kpt2.f_n[n2] * self.q_weights[iq]
if abs(f1) < fcut or abs(f2) < fcut:
continue
nt_R = self.calculate_pair_density(n1, n2, kpt1, kpt2,
ik1, ik2, bzq_index)
nt_G = self.pwd.fft(nt_R * eikr_R) / N
vt_G = nt_G.copy()
vt_G *= -pi * vol / Gpk2_G
e = np.vdot(nt_G, vt_G).real * nspins * self.hybrid
self.exx += f1 * f2 * e
self.exx_kq[ik1,iq] += f1*f2*e
def calculate_pair_density(self, n1, n2, kpt1, kpt2, ik1, ik2, bzq_index):
psit1_G = self.kd.transform_wave_function(kpt1.psit_nG[n1], ik1)
psit2_G = self.kd.transform_wave_function(kpt2.psit_nG[n2], ik2)
nt_G = psit1_G.conj() * psit2_G
s1 = self.kd.sym_k[ik1]
s2 = self.kd.sym_k[ik2]
t1 = self.kd.time_reversal_k[ik1]
t2 = self.kd.time_reversal_k[ik2]
k1_c = self.kd.ibzk_kc[kpt1.k]
k2_c = self.kd.ibzk_kc[kpt2.k]
Q_aL = {}
for a in kpt1.P_ani.keys():
b1 = self.kd.symmetry.a_sa[s1, a]
b2 = self.kd.symmetry.a_sa[s2, a]
S1_c = (np.dot(self.spos_ac[a], self.kd.symmetry.op_scc[s1]) -
self.spos_ac[b1])
S2_c = (np.dot(self.spos_ac[a], self.kd.symmetry.op_scc[s2]) -
self.spos_ac[b2])
assert abs(S1_c.round() - S1_c).max() < 1e-13
assert abs(S2_c.round() - S2_c).max() < 1e-13
x1 = np.exp(2j * pi * np.dot(k1_c, S1_c))
x2 = np.exp(2j * pi * np.dot(k2_c, S2_c))
P1_i = np.dot(self.setups[a].R_sii[s1], kpt1.P_ani[b1][n1]) * x1
P2_i = np.dot(self.setups[a].R_sii[s2], kpt2.P_ani[b2][n2]) * x2
if t1:
P1_i = P1_i.conj()
if t2:
P2_i = P2_i.conj()
D_ii = np.outer(P1_i.conj(), P2_i)
D_p = pack(D_ii)
Q_aL[a] = np.dot(D_p, self.setups[a].Delta_pL)
self.ghat.add(nt_G, Q_aL, bzq_index)
return nt_G
def calculate_exx_paw_correction(self):
exx = 0
deg = 2 // self.nspins # spin degeneracy
for a, D_sp in self.density.D_asp.items():
setup = self.setups[a]
for D_p in D_sp:
D_ii = unpack2(D_p)
ni = len(D_ii)
for i1 in range(ni):
for i2 in range(ni):
A = 0.0
for i3 in range(ni):
p13 = packed_index(i1, i3, ni)
for i4 in range(ni):
p24 = packed_index(i2, i4, ni)
A += setup.M_pp[p13, p24] * D_ii[i3, i4]
p12 = packed_index(i1, i2, ni)
exx -= self.hybrid / deg * D_ii[i1, i2] * A
if setup.X_p is not None:
exx -= self.hybrid * np.dot(D_p, setup.X_p)
exx += self.hybrid * setup.ExxC
return exx
def print_initialization(self, xc):
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt, 'Non-self-consistent HF correlation energy'
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt, 'Started at: ', ctime()
print >> self.txt
print >> self.txt, \
'Ground state XC functional : %s' % xc
print >> self.txt, \
'Valence electrons : %s' % self.setups.nvalence
print >> self.txt, \
'Number of Spins : %s' % self.nspins
print >> self.txt, \
'Plane wave cutoff energy : %4.1f eV' % (self.ecut*Ha)
print >> self.txt, \
'Gamma q-point excluded : %s' % self.skip_gamma
if not self.skip_gamma:
print >> self.txt, \
'Alpha parameter : %s' % self.alpha
print >> self.txt, \
'Gamma parameter : %3.3f' % self.gamma
print >> self.txt, \
'ACDF method : %s' % self.acdf
print >> self.txt, \
'Number of k-points : %s' % len(self.kd.bzk_kc)
print >> self.txt, \
'Number of Irreducible k-points : %s' % len(self.kd.ibzk_kc)
print >> self.txt, \
'Number of q-points : %s' % len(self.bzq_qc)
if not self.qsym:
print >> self.txt, \
'q-point symmetry : %s' % self.qsym
else:
print >> self.txt, \
'Number of Irreducible q-points : %s' % len(self.ibzq_qc)
print >> self.txt
for q, weight in zip(self.ibzq_qc, self.q_weights):
print >> self.txt, 'q: [%1.3f %1.3f %1.3f] - weight: %1.3f' % \
(q[0],q[1],q[2], weight/len(self.bzq_qc))
print >> self.txt
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt, \
'------------------------------------------------------'
print >> self.txt
print >> self.txt, 'Looping over k-points in the full Brillouin zone'
print >> self.txt
|
robwarm/gpaw-symm
|
gpaw/xc/hybridq.py
|
Python
|
gpl-3.0
| 16,684
|
[
"ASE",
"GPAW"
] |
fb9f35d1e9824d73ef566fb77a876378e9457ae6480d9d26fb7bd30f1e1ea750
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
# Version
MAJOR="0"
MINOR="5"
MICRO="0"
VERSION= MAJOR + "." + MINOR + "." + MICRO
here = path.abspath(path.dirname(__file__))
with open(path.join(here,'README.md')) as f:
long_description = f.read()
# Variables for py2app
p2a_includes=[
'PyQt4',
'subprocess',
'numpy',
'numpy.core',
'configobj',
'scipy',
'traits',
'traitsui',
'traitsui.editors',
'traitsui.editors.*',
'traitsui.extras',
'traitsui.extras.*',
'traitsui.image',
'traitsui.image.*',
'traitsui.ui_traits',
'traits.api',
'traits.*',
'vtk',
'tvtk',
'tvtk.*',
'tvtk.tvtk_classes',
'tvtk.pyface.*',
'tvtk.pyface.ui.qt4',
'tvtk.pyface.ui.qt4.*',
'tvtk.tools',
'tvtk.tools.*',
'tvtk.view',
'tvtk.view.*',
'tvtk.plugins',
'tvtk.plugins.*',
'traitsui.qt4',
'traitsui.qt4.*',
'chaco',
'chaco.*',
'kiva',
'pyface',
'pyface.*',
'pyface.qt4',
'pyface.toolkit',
'pyface.image_resource',
'pyface.image_resource.*',
'pyface.ui.qt4',
'pyface.ui.qt4.init',
'pyface.ui.qt4.*',
'pyface.ui.qt4.grid.*',
'pyface.ui.qt4.action.*',
'pyface.ui.qt4.timer.*',
'pyface.ui.qt4.wizard.*',
'pyface.ui.qt4.workbench.*',
'enable',
'enable.drawing',
'enable.tools',
'enable.qt4',
'enable.qt4.*',
'enable.savage',
'enable.savage.*',
'enable.savage.svg',
'enable.savage.svg.*',
'enable.savage.svg.backends',
#'enable.savage.svg.backends.wx',
#'enable.savage.svg.backends.wx.*',
'enable.savage.svg.css',
'enable.savage.compliance',
'enable.savage.trait_defs',
'enable.savage.trait_defs.*',
'enable.savage.trait_defs.ui',
'enable.savage.trait_defs.ui.*',
'enable.savage.trait_defs.ui.qt4',
'enable.savage.trait_defs.ui.qt4.*',
'matplotlib',
#'dsi2'
#'dsi2.*'
#'dsi2.aggregation.segmentation'
'sklearn',
'sklearn.metrics',
'sklearn.cluster',
'sklearn.utils.lgamma',
'sklearn.utils.sparsetools.*',
'sklearn.neighbors.*'
]
p2a_packages=[]
p2a_options = dict(
includes=p2a_includes,
packages=p2a_packages,
excludes=["/usr/local/Cellar/vtk5/5.10.1_1/lib/vtk-5.10/libvtkIOPythonD.5.10.dylib"],#anything we need to forcibly exclude?
#resources=resources,
argv_emulation=True,
site_packages=True,
#frameworks=frameworks,
iconfile='dsi2/example_data/dsi2.icns',
plist=dict(
CFBundleIconFile='dsi2/example_data/dsi2.icns',
CFBundleName = "DSI2",
CFBundleShortVersionString = VERSION, # must be in X.X.X format
CFBundleGetInfoString = "DSI2 "+ VERSION,
CFBundleExecutable = "DSI2view",
CFBundleIdentifier = "org.dsi2.DSI2view",
CFBundleLicense = "GNU GPLv3+",
CFBundleDocumentTypes=[{"CFBundleTypeExtensions":['*']}],
)
)
setup(
app=['DSI2app.py'],
name='DSI2',
version=VERSION,
description='DSI2 Toolbox',
author='Matthew Cieslak',
author_email='mattcieslak@gmail.com',
license='GPLv3',
url='https://github.com/mattcieslak/DSI2',
packages = find_packages( exclude = ['doc', 'tests', "example_data"]),
include_package_data = True, # Comes from MANIFEST.in
package_data={
"dsi2":[
"example_data/lausanne2008/ParcellationLausanne2008.xls",
"example_data/lausanne2008/README.txt",
"example_data/lausanne2008/resolution1015/resolution1015.graphml",
"example_data/lausanne2008/resolution150/resolution150.graphml",
"example_data/lausanne2008/resolution258/resolution258.graphml",
"example_data/lausanne2008/resolution500/resolution500.graphml",
"example_data/lausanne2008/resolution83/resolution83.graphml",
"example_data/MNI152_T1_2mm_brain_mask.nii.gz",
"example_data/MNI152_T1_2mm.nii.gz",
"example_data/MNI_BRAIN_MASK_FLOAT.nii.gz",
"example_data/NTU90_QA.nii.gz"
]},
options = {"py2app":p2a_options},
setup_requires = ['py2app'],
entry_points = {
'gui_scripts':[
'dsi2_browse = dsi2.app_launch:browser_builder',
'dsi2_import = dsi2.app_launch:import_data',
'dsi2_view = dsi2.app_launch:view_tracks'
]
}
)
|
mattcieslak/DSI2
|
setupApp.py
|
Python
|
gpl-3.0
| 4,592
|
[
"VTK"
] |
1410f89fea75968b7f850530a241054415e907bf0a21cb70ee5feb9d63ddd05b
|
#!/usr/bin/python3
import sys
def visited(node, path):
return node in path
class CycleFind(object):
def __init__(self, graph, source, **kwargs):
self.graph = graph
self.source = source
self.cycles = []
self.edges, self.weights = self.getEdges(graph)
self.cyclelimit = kwargs.get('cyclelimit', None)
if self.cyclelimit is not None:
self.cyclelimit = int(self.cyclelimit)
def run(self):
paths = []
for node in self.source:
self.findNewCycles([node])
return self.cycles
def getEdges(self, graph):
retval = ([], {})
for ik, iv in graph.items():
for jk, jv in iv.items():
retval[0].append((ik, jk))
retval[1][(ik, jk)] = jv
return retval
def findNewCycles(self, path):
start_node = path[-1]
next_node= None
sub = []
if self.cyclelimit is not None and \
len(path) > self.cyclelimit:
return
#visit each edge and each node of each edge
for edge in self.edges:
node1, node2 = edge
if node1 == start_node:
next_node = node2
# check if there is a chord with a shorter path
weight = self.weights[edge]
shorter_path = False
for i1, i2 in zip(path[-2::-1], path[::-1]):
weight += self.weights[(i1, i2)]
if (i1, next_node) in self.edges and \
self.weights[(i1, next_node)] < weight:
shorter_path = True
break
if shorter_path:
continue
if not visited(next_node, path):
# neighbor node not on path yet
sub = list(path)
sub.append(next_node)
# explore extended path
self.findNewCycles(sub);
elif len(path) > 2 and next_node in path:
# cycle found
p = self.rotate_to_smallest(path, next_node);
if self.isNew(p):
self.cycles.append(p)
def filter_negative(self, cycles):
negative_cycles = []
for i in cycles:
total = 0.0
path = list(i)
path.append(i[0])
for j in zip(path, path[1::]):
total -= self.graph[j[0]][j[1]]
if total > 0.0:
negative_cycles.append(i)
return negative_cycles
# rotate cycle path such that it begins with the smallest node
def rotate_to_smallest(self, path, next_node):
i = path.index(next_node)
path = path[i::]
for i in self.source:
if i in path:
n = path.index(i)
return path[n:]+path[:n]
n = path.index(min(path))
return path[n:]+path[:n]
def isNew(self, path):
return not path in self.cycles
if __name__ == '__main__':
from cycledetect import CycleDetect
cd = CycleDetect()
for i in sys.argv[1:]:
with open(i, 'r') as csvfile:
print("Loading: ", i)
cd.load([csvfile])
cf = CycleFind(cd.graph, cd.origins, cyclelimit=cd.cyclelimit)
cycles = cf.run()
negative_cycles = cf.filter_negative(cycles)
for i in cycles:
print (" -> " .join(i))
|
joequant/cycle-detector
|
cyclefind.py
|
Python
|
bsd-2-clause
| 3,465
|
[
"VisIt"
] |
b4ba01c15ec3fa6c25215d78fd6318dd9c0f1961f13a8d45c6a6789fc5074f9f
|
# -*- coding: utf-8 -*-
"""Functions that expose information about templates that might be
interesting for introspection.
"""
from . import nodes
from ._compat import iteritems
from ._compat import string_types
from .compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all(
(nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and isinstance(
node.template.value, (tuple, list)
):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
sserrot/champion_relationships
|
venv/Lib/site-packages/jinja2/meta.py
|
Python
|
mit
| 4,131
|
[
"VisIt"
] |
423c9885f3510f74025e3049a623e5f4a82412419725b024094ab8f94af5d044
|
# Copyright (C) 2016 Collin Capano, Christopher M. Biwer, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for drawing and calculating the
probability density function of distributions.
"""
# imports needed for functions below
from pycbc.workflow import ConfigParser as _ConfigParser
from pycbc.distributions import constraints
from pycbc import VARARGS_DELIM as _VARARGS_DELIM
# Promote some classes/functions to the distributions name space
from pycbc.distributions.angular import UniformAngle, SinAngle, CosAngle, \
UniformSolidAngle
from pycbc.distributions.arbitrary import Arbitrary, FromFile
from pycbc.distributions.gaussian import Gaussian
from pycbc.distributions.power_law import UniformPowerLaw, UniformRadius
from pycbc.distributions.sky_location import UniformSky
from pycbc.distributions.uniform import Uniform
from pycbc.distributions.uniform_log import UniformLog10
from pycbc.distributions.spins import IndependentChiPChiEff
from pycbc.distributions.qnm import UniformF0Tau
from pycbc.distributions.joint import JointDistribution
# a dict of all available distributions
distribs = {
IndependentChiPChiEff.name : IndependentChiPChiEff,
Arbitrary.name : Arbitrary,
FromFile.name : FromFile,
Gaussian.name : Gaussian,
UniformPowerLaw.name : UniformPowerLaw,
UniformRadius.name : UniformRadius,
Uniform.name : Uniform,
UniformAngle.name : UniformAngle,
CosAngle.name : CosAngle,
SinAngle.name : SinAngle,
UniformSolidAngle.name : UniformSolidAngle,
UniformSky.name : UniformSky,
UniformLog10.name : UniformLog10,
UniformF0Tau.name : UniformF0Tau,
}
def read_distributions_from_config(cp, section="prior"):
"""Returns a list of PyCBC distribution instances for a section in the
given configuration file.
Parameters
----------
cp : WorflowConfigParser
An open config file to read.
section : {"prior", string}
Prefix on section names from which to retrieve the distributions.
Returns
-------
list
A list of the parsed distributions.
"""
dists = []
variable_args = []
for subsection in cp.get_subsections(section):
name = cp.get_opt_tag(section, "name", subsection)
dist = distribs[name].from_config(cp, section, subsection)
if set(dist.params).isdisjoint(variable_args):
dists.append(dist)
variable_args += dist.params
else:
raise ValueError("Same parameter in more than one distribution.")
return dists
def _convert_liststring_to_list(lstring):
"""Checks if an argument of the configuration file is a string of a list
and returns the corresponding list (of strings).
The argument is considered to be a list if it starts with '[' and ends
with ']'. List elements should be comma separated. For example, passing
`'[foo bar, cat]'` will result in `['foo bar', 'cat']` being returned. If
the argument does not start and end with '[' and ']', the argument will
just be returned as is.
"""
if lstring[0]=='[' and lstring[-1]==']':
lstring = [str(lstring[1:-1].split(',')[n].strip().strip("'"))
for n in range(len(lstring[1:-1].split(',')))]
return lstring
def read_params_from_config(cp, prior_section='prior',
vargs_section='variable_args',
sargs_section='static_args'):
"""Loads static and variable parameters from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
prior_section : str, optional
Check that priors exist in the given section. Default is 'prior.'
vargs_section : str, optional
The section to get the parameters that will be varied/need priors
defined for them. Default is 'variable_args'.
sargs_section : str, optional
The section to get the parameters that will remain fixed. Default is
'static_args'.
Returns
-------
variable_args : list
The names of the parameters to vary in the PE run.
static_args : dict
Dictionary of names -> values giving the parameters to keep fixed.
"""
# sanity check that each parameter in [variable_args] has a priors section
variable_args = cp.options(vargs_section)
subsections = cp.get_subsections(prior_section)
tags = set([p for tag in subsections for p in tag.split('+')])
missing_prior = set(variable_args) - tags
if any(missing_prior):
raise KeyError("You are missing a priors section in the config file "
"for parameter(s): {}".format(', '.join(missing_prior)))
# sanity check that each parameter with a priors section is in
# [variable_args]
missing_variable = tags - set(variable_args)
if any(missing_variable):
raise KeyError("Prior section found for parameter(s) {} but not "
"listed as variable parameter(s)."
.format(', '.join(missing_variable)))
# get static args
try:
static_args = dict([(key, cp.get_opt_tags(sargs_section, key, []))
for key in cp.options(sargs_section)])
except _ConfigParser.NoSectionError:
static_args = {}
# try converting values to float
for key in static_args:
val = static_args[key]
try:
# the following will raise a ValueError if it cannot be cast to
# float (as we would expect for string arguments)
static_args[key] = float(val)
except ValueError:
# try converting to a list of strings; this function will just
# return val if it does not begin (end) with [ (])
static_args[key] = _convert_liststring_to_list(val)
return variable_args, static_args
def read_constraints_from_config(cp, transforms=None,
constraint_section='constraint'):
"""Loads parameter constraints from a configuration file.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
transforms : list, optional
List of transforms to apply to parameters before applying constraints.
constraint_section : str, optional
The section to get the constraints from. Default is 'constraint'.
Returns
-------
list
List of ``Constraint`` objects. Empty if no constraints were provided.
"""
cons = []
for subsection in cp.get_subsections(constraint_section):
name = cp.get_opt_tag(constraint_section, "name", subsection)
constraint_arg = cp.get_opt_tag(
constraint_section, "constraint_arg", subsection)
# get any other keyword arguments
kwargs = {}
section = constraint_section + "-" + subsection
extra_opts = [key for key in cp.options(section)
if key not in ["name", "constraint_arg"]]
for key in extra_opts:
val = cp.get(section, key)
if key == "required_parameters":
val = val.split(_VARARGS_DELIM)
else:
try:
val = float(val)
except ValueError:
pass
kwargs[key] = val
cons.append(constraints.constraints[name](constraint_arg,
transforms=transforms,
**kwargs))
return cons
|
cmbiwer/pycbc
|
pycbc/distributions/__init__.py
|
Python
|
gpl-3.0
| 8,268
|
[
"Gaussian"
] |
fbf12f066c0325b607a45733abc4c056021899f1b3e6b3375aa252fbb68f40dc
|
"""RabbitMQAdmin module serves for the management of the internal RabbitMQ
users database. It uses rabbitmqctl command. Only the user with the right
permissions can execute those commands.
"""
import re
from DIRAC import S_OK, S_ERROR
import errno
from DIRAC.Core.Utilities import Subprocess
def executeRabbitmqctl(arg, *argv):
"""Executes RabbitMQ administration command.
It uses rabbitmqctl command line interface.
For every command the -q argument ("quit mode")
is used, since in some cases the output must be processed,
so we don't want any additional informations printed.
Args:
arg(str): command recognized by the rabbitmqctl.
argv: optional list of string parameters.
:rtype: S_OK or S_ERROR
:type argv: python:list
"""
command = ["sudo", "/usr/sbin/rabbitmqctl", "-q", arg] + list(argv)
timeOut = 30
result = Subprocess.systemCall(timeout=timeOut, cmdSeq=command)
if not result["OK"]:
return S_ERROR(errno.EPERM, "%r failed to launch" % command)
errorcode, cmd_out, cmd_err = result["Value"]
if errorcode:
# No idea what errno code should be used here.
# Maybe we should define some specific for rabbitmqctl
return S_ERROR(
errno.EPERM, "%r failed, status code: %s stdout: %r stderr: %r" % (command, errorcode, cmd_out, cmd_err)
)
return S_OK(cmd_out)
def addUserWithoutPassword(user):
"""Adds user to the internal RabbitMQ database
and clears its password.
This should be done for all users, that
will be using SSL authentication. They do not
need any password.
"""
ret = addUser(user)
if not ret["OK"]:
return ret
return clearUserPassword(user)
def addUser(user, password="password"):
"""Adds user to the internal RabbitMQ database
Function also sets user password.
User still cannot access to any resources, without
having permissions set.
"""
return executeRabbitmqctl("add_user", user, password)
def deleteUser(user):
"""Removes the user from the internal RabbitMQ database."""
return executeRabbitmqctl("delete_user", user)
def getAllUsers():
"""Returns all existing users in the internal RabbitMQ database.
:returns: S_OK with a list of all users
:rtype: S_OK
"""
ret = executeRabbitmqctl("list_users")
if not ret["OK"]:
return ret
users = ret["Value"]
users = users.split("\n")
# the rabbitMQ user list is given in the format:
# user_name [usr_tag]
# I remove [usr_tag] part.
# Also only non-empty users are proceeded further.
# Empty users can appear, cause every new line was
# treated as a new user.
users = [re.sub(r"\\t\[\w*\]$", "", u) for u in users if u]
return S_OK(users)
def setUserPermission(user):
return executeRabbitmqctl("set_permissions", "-p", "/", user, '".*"', '".*"', '".*"')
def clearUserPassword(user):
"""Clears users password for the internal RabbitMQ
database. User with no password cannot enter
the RabbitMQ website interface but still can
connect via SSL if given permission.
"""
return executeRabbitmqctl("clear_password", user)
def setUsersPermissions(users):
successful = {}
failed = {}
for u in users:
ret = setUserPermission(u)
if ret["OK"]:
successful[u] = ret["Value"]
else:
print("Problem with permissions:%s" % ret["Message"])
failed[u] = "Permission not set because of:%s" % ret["Message"]
return S_OK({"Successful": successful, "Failed": failed})
def addUsersWithoutPasswords(users):
successful = {}
failed = {}
for u in users:
ret = addUserWithoutPassword(u)
if ret["OK"]:
successful[u] = ret["Value"]
else:
print("Problem with adding user:%s" % ret["Message"])
failed[u] = "User not added"
return S_OK({"Successful": successful, "Failed": failed})
def addUsers(users):
"""Adds users to the RabbitMQ internal database."""
successful = {}
failed = {}
for u in users:
ret = addUser(u)
if ret["OK"]:
successful[u] = ret["Value"]
else:
print("Problem with adding user:%s" % ret["Message"])
failed[u] = "User not added"
return S_OK({"Successful": successful, "Failed": failed})
def deleteUsers(users):
"""Deletes users from the RabbitMQ internal database."""
successful = {}
failed = {}
for u in users:
ret = deleteUser(u)
if ret["OK"]:
successful[u] = ret["Value"]
else:
print("Problem with adding user:%s" % ret["Message"])
failed[u] = "User not added"
return S_OK({"Successful": successful, "Failed": failed})
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/RabbitMQAdmin.py
|
Python
|
gpl-3.0
| 4,820
|
[
"DIRAC"
] |
606ae4ede36d26c98c84287e6017c69379419dcbab91b3159d5784d1ed3a0c20
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
from .elastic import *
from .tensors import *
from .stress import *
from .strain import *
|
matk86/pymatgen
|
pymatgen/analysis/elasticity/__init__.py
|
Python
|
mit
| 220
|
[
"pymatgen"
] |
d1d7c21f6463e7211fcebcf9270420634a81a67cc124418a4af6fbff66ed9a0e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
emoji_table = {
':smile:': u'😄',
':smiley:': u'😃',
':grinning:': u'😀',
':blush:': u'😊',
':relaxed:': u'☺️',
':wink:': u'😉',
':heart_eyes:': u'😍',
':kissing_heart:': u'😘',
':kissing_closed_eyes:': u'😚',
':kissing:': u'😗',
':kissing_smiling_eyes:': u'😙',
':stuck_out_tongue_winking_eye:': u'😜',
':stuck_out_tongue_closed_eyes:': u'😝',
':stuck_out_tongue:': u'😛',
':flushed:': u'😳',
':grin:': u'😁',
':pensive:': u'😔',
':relieved:': u'😌',
':unamused:': u'😒',
':disappointed:': u'😞',
':persevere:': u'😣',
':cry:': u'😢',
':joy:': u'😂',
':sob:': u'😭',
':sleepy:': u'😪',
':disappointed_relieved:': u'😥',
':cold_sweat:': u'😰',
':sweat_smile:': u'😅',
':sweat:': u'😓',
':weary:': u'😩',
':tired_face:': u'😫',
':fearful:': u'😨',
':scream:': u'😱',
':angry:': u'😠',
':rage:': u'😡',
':triumph:': u'😤',
':confounded:': u'😖',
':laughing:': u'😆',
':satisfied:': u'😆',
':yum:': u'😋',
':mask:': u'😷',
':sunglasses:': u'😎',
':sleeping:': u'😴',
':dizzy_face:': u'😵',
':astonished:': u'😲',
':worried:': u'😟',
':frowning:': u'😦',
':anguished:': u'😧',
':smiling_imp:': u'😈',
':imp:': u'👿',
':open_mouth:': u'😮',
':grimacing:': u'😬',
':neutral_face:': u'😐',
':confused:': u'😕',
':hushed:': u'😯',
':no_mouth:': u'😶',
':innocent:': u'😇',
':smirk:': u'😏',
':expressionless:': u'😑',
':man_with_gua_pi_mao:': u'👲',
':man_with_turban:': u'👳',
':cop:': u'👮',
':construction_worker:': u'👷',
':guardsman:': u'💂',
':baby:': u'👶',
':boy:': u'👦',
':girl:': u'👧',
':man:': u'👨',
':woman:': u'👩',
':older_man:': u'👴',
':older_woman:': u'👵',
':person_with_blond_hair:': u'👱',
':angel:': u'👼',
':princess:': u'👸',
':smiley_cat:': u'😺',
':smile_cat:': u'😸',
':heart_eyes_cat:': u'😻',
':kissing_cat:': u'😽',
':smirk_cat:': u'😼',
':scream_cat:': u'🙀',
':crying_cat_face:': u'😿',
':joy_cat:': u'😹',
':pouting_cat:': u'😾',
':japanese_ogre:': u'👹',
':japanese_goblin:': u'👺',
':see_no_evil:': u'🙈',
':hear_no_evil:': u'🙉',
':speak_no_evil:': u'🙊',
':skull:': u'💀',
':alien:': u'👽',
':hankey:': u'💩',
':poop:': u'💩',
':shit:': u'💩',
':fire:': u'🔥',
':sparkles:': u'✨',
':star2:': u'🌟',
':dizzy:': u'💫',
':boom:': u'💥',
':collision:': u'💥',
':anger:': u'💢',
':sweat_drops:': u'💦',
':droplet:': u'💧',
':zzz:': u'💤',
':dash:': u'💨',
':ear:': u'👂',
':eyes:': u'👀',
':nose:': u'👃',
':tongue:': u'👅',
':lips:': u'👄',
':+1:': u'👍',
':thumbsup:': u'👍',
':-1:': u'👎',
':thumbsdown:': u'👎',
':ok_hand:': u'👌',
':facepunch:': u'👊',
':punch:': u'👊',
':fist:': u'✊',
':v:': u'✌️',
':wave:': u'👋',
':hand:': u'✋',
':raised_hand:': u'✋',
':open_hands:': u'👐',
':point_up_2:': u'👆',
':point_down:': u'👇',
':point_right:': u'👉',
':point_left:': u'👈',
':raised_hands:': u'🙌',
':pray:': u'🙏',
':point_up:': u'☝️',
':clap:': u'👏',
':muscle:': u'💪',
':walking:': u'🚶',
':runner:': u'🏃',
':running:': u'🏃',
':dancer:': u'💃',
':couple:': u'👫',
':family:': u'👪',
':two_men_holding_hands:': u'👬',
':two_women_holding_hands:': u'👭',
':couplekiss:': u'💏',
':couple_with_heart:': u'💑',
':dancers:': u'👯',
':ok_woman:': u'🙆',
':no_good:': u'🙅',
':information_desk_person:': u'💁',
':raising_hand:': u'🙋',
':massage:': u'💆',
':haircut:': u'💇',
':nail_care:': u'💅',
':bride_with_veil:': u'👰',
':person_with_pouting_face:': u'🙎',
':person_frowning:': u'🙍',
':bow:': u'🙇',
':tophat:': u'🎩',
':crown:': u'👑',
':womans_hat:': u'👒',
':athletic_shoe:': u'👟',
':mans_shoe:': u'👞',
':shoe:': u'👞',
':sandal:': u'👡',
':high_heel:': u'👠',
':boot:': u'👢',
':shirt:': u'👕',
':tshirt:': u'👕',
':necktie:': u'👔',
':womans_clothes:': u'👚',
':dress:': u'👗',
':running_shirt_with_sash:': u'🎽',
':jeans:': u'👖',
':kimono:': u'👘',
':bikini:': u'👙',
':briefcase:': u'💼',
':handbag:': u'👜',
':pouch:': u'👝',
':purse:': u'👛',
':eyeglasses:': u'👓',
':ribbon:': u'🎀',
':closed_umbrella:': u'🌂',
':lipstick:': u'💄',
':yellow_heart:': u'💛',
':blue_heart:': u'💙',
':purple_heart:': u'💜',
':green_heart:': u'💚',
':heart:': u'❤️',
':broken_heart:': u'💔',
':heartpulse:': u'💗',
':heartbeat:': u'💓',
':two_hearts:': u'💕',
':sparkling_heart:': u'💖',
':revolving_hearts:': u'💞',
':cupid:': u'💘',
':love_letter:': u'💌',
':kiss:': u'💋',
':ring:': u'💍',
':gem:': u'💎',
':bust_in_silhouette:': u'👤',
':busts_in_silhouette:': u'👥',
':speech_balloon:': u'💬',
':footprints:': u'👣',
':thought_balloon:': u'💭',
':dog:': u'🐶',
':wolf:': u'🐺',
':cat:': u'🐱',
':mouse:': u'🐭',
':hamster:': u'🐹',
':rabbit:': u'🐰',
':frog:': u'🐸',
':tiger:': u'🐯',
':koala:': u'🐨',
':bear:': u'🐻',
':pig:': u'🐷',
':pig_nose:': u'🐽',
':cow:': u'🐮',
':boar:': u'🐗',
':monkey_face:': u'🐵',
':monkey:': u'🐒',
':horse:': u'🐴',
':sheep:': u'🐑',
':elephant:': u'🐘',
':panda_face:': u'🐼',
':penguin:': u'🐧',
':bird:': u'🐦',
':baby_chick:': u'🐤',
':hatched_chick:': u'🐥',
':hatching_chick:': u'🐣',
':chicken:': u'🐔',
':snake:': u'🐍',
':turtle:': u'🐢',
':bug:': u'🐛',
':bee:': u'🐝',
':honeybee:': u'🐝',
':ant:': u'🐜',
':beetle:': u'🐞',
':snail:': u'🐌',
':octopus:': u'🐙',
':shell:': u'🐚',
':tropical_fish:': u'🐠',
':fish:': u'🐟',
':dolphin:': u'🐬',
':flipper:': u'🐬',
':whale:': u'🐳',
':whale2:': u'🐋',
':cow2:': u'🐄',
':ram:': u'🐏',
':rat:': u'🐀',
':water_buffalo:': u'🐃',
':tiger2:': u'🐅',
':rabbit2:': u'🐇',
':dragon:': u'🐉',
':racehorse:': u'🐎',
':goat:': u'🐐',
':rooster:': u'🐓',
':dog2:': u'🐕',
':pig2:': u'🐖',
':mouse2:': u'🐁',
':ox:': u'🐂',
':dragon_face:': u'🐲',
':blowfish:': u'🐡',
':crocodile:': u'🐊',
':camel:': u'🐫',
':dromedary_camel:': u'🐪',
':leopard:': u'🐆',
':cat2:': u'🐈',
':poodle:': u'🐩',
':feet:': u'🐾',
':paw_prints:': u'🐾',
':bouquet:': u'💐',
':cherry_blossom:': u'🌸',
':tulip:': u'🌷',
':four_leaf_clover:': u'🍀',
':rose:': u'🌹',
':sunflower:': u'🌻',
':hibiscus:': u'🌺',
':maple_leaf:': u'🍁',
':leaves:': u'🍃',
':fallen_leaf:': u'🍂',
':herb:': u'🌿',
':ear_of_rice:': u'🌾',
':mushroom:': u'🍄',
':cactus:': u'🌵',
':palm_tree:': u'🌴',
':evergreen_tree:': u'🌲',
':deciduous_tree:': u'🌳',
':chestnut:': u'🌰',
':seedling:': u'🌱',
':blossom:': u'🌼',
':globe_with_meridians:': u'🌐',
':sun_with_face:': u'🌞',
':full_moon_with_face:': u'🌝',
':new_moon_with_face:': u'🌚',
':new_moon:': u'🌑',
':waxing_crescent_moon:': u'🌒',
':first_quarter_moon:': u'🌓',
':moon:': u'🌔',
':waxing_gibbous_moon:': u'🌔',
':full_moon:': u'🌕',
':waning_gibbous_moon:': u'🌖',
':last_quarter_moon:': u'🌗',
':waning_crescent_moon:': u'🌘',
':last_quarter_moon_with_face:': u'🌜',
':first_quarter_moon_with_face:': u'🌛',
':crescent_moon:': u'🌙',
':earth_africa:': u'🌍',
':earth_americas:': u'🌎',
':earth_asia:': u'🌏',
':volcano:': u'🌋',
':milky_way:': u'🌌',
':stars:': u'🌠',
':star:': u'⭐',
':sunny:': u'☀️',
':partly_sunny:': u'⛅',
':cloud:': u'☁️',
':zap:': u'⚡',
':umbrella:': u'☔',
':snowflake:': u'❄️',
':snowman:': u'⛄',
':cyclone:': u'🌀',
':foggy:': u'🌁',
':rainbow:': u'🌈',
':ocean:': u'🌊',
':bamboo:': u'🎍',
':gift_heart:': u'💝',
':dolls:': u'🎎',
':school_satchel:': u'🎒',
':mortar_board:': u'🎓',
':flags:': u'🎏',
':fireworks:': u'🎆',
':sparkler:': u'🎇',
':wind_chime:': u'🎐',
':rice_scene:': u'🎑',
':jack_o_lantern:': u'🎃',
':ghost:': u'👻',
':santa:': u'🎅',
':christmas_tree:': u'🎄',
':gift:': u'🎁',
':tanabata_tree:': u'🎋',
':tada:': u'🎉',
':confetti_ball:': u'🎊',
':balloon:': u'🎈',
':crossed_flags:': u'🎌',
':crystal_ball:': u'🔮',
':movie_camera:': u'🎥',
':camera:': u'📷',
':video_camera:': u'📹',
':vhs:': u'📼',
':cd:': u'💿',
':dvd:': u'📀',
':minidisc:': u'💽',
':floppy_disk:': u'💾',
':computer:': u'💻',
':iphone:': u'📱',
':phone:': u'☎️',
':telephone:': u'☎️',
':telephone_receiver:': u'📞',
':pager:': u'📟',
':fax:': u'📠',
':satellite:': u'📡',
':tv:': u'📺',
':radio:': u'📻',
':loud_sound:': u'🔊',
':sound:': u'🔉',
':speaker:': u'🔈',
':mute:': u'🔇',
':bell:': u'🔔',
':no_bell:': u'🔕',
':loudspeaker:': u'📢',
':mega:': u'📣',
':hourglass_flowing_sand:': u'⏳',
':hourglass:': u'⌛',
':alarm_clock:': u'⏰',
':watch:': u'⌚',
':unlock:': u'🔓',
':lock:': u'🔒',
':lock_with_ink_pen:': u'🔏',
':closed_lock_with_key:': u'🔐',
':key:': u'🔑',
':mag_right:': u'🔎',
':bulb:': u'💡',
':flashlight:': u'🔦',
':high_brightness:': u'🔆',
':low_brightness:': u'🔅',
':electric_plug:': u'🔌',
':battery:': u'🔋',
':mag:': u'🔍',
':bathtub:': u'🛁',
':bath:': u'🛀',
':shower:': u'🚿',
':toilet:': u'🚽',
':wrench:': u'🔧',
':nut_and_bolt:': u'🔩',
':hammer:': u'🔨',
':door:': u'🚪',
':smoking:': u'🚬',
':bomb:': u'💣',
':gun:': u'🔫',
':hocho:': u'🔪',
':knife:': u'🔪',
':pill:': u'💊',
':syringe:': u'💉',
':moneybag:': u'💰',
':yen:': u'💴',
':dollar:': u'💵',
':pound:': u'💷',
':euro:': u'💶',
':credit_card:': u'💳',
':money_with_wings:': u'💸',
':calling:': u'📲',
':e-mail:': u'📧',
':inbox_tray:': u'📥',
':outbox_tray:': u'📤',
':email:': u'✉️',
':envelope:': u'✉️',
':envelope_with_arrow:': u'📩',
':incoming_envelope:': u'📨',
':postal_horn:': u'📯',
':mailbox:': u'📫',
':mailbox_closed:': u'📪',
':mailbox_with_mail:': u'📬',
':mailbox_with_no_mail:': u'📭',
':postbox:': u'📮',
':package:': u'📦',
':memo:': u'📝',
':pencil:': u'📝',
':page_facing_up:': u'📄',
':page_with_curl:': u'📃',
':bookmark_tabs:': u'📑',
':bar_chart:': u'📊',
':chart_with_upwards_trend:': u'📈',
':chart_with_downwards_trend:': u'📉',
':scroll:': u'📜',
':clipboard:': u'📋',
':date:': u'📅',
':calendar:': u'📆',
':card_index:': u'📇',
':file_folder:': u'📁',
':open_file_folder:': u'📂',
':scissors:': u'✂️',
':pushpin:': u'📌',
':paperclip:': u'📎',
':black_nib:': u'✒️',
':pencil2:': u'✏️',
':straight_ruler:': u'📏',
':triangular_ruler:': u'📐',
':closed_book:': u'📕',
':green_book:': u'📗',
':blue_book:': u'📘',
':orange_book:': u'📙',
':notebook:': u'📓',
':notebook_with_decorative_cover:': u'📔',
':ledger:': u'📒',
':books:': u'📚',
':book:': u'📖',
':open_book:': u'📖',
':bookmark:': u'🔖',
':name_badge:': u'📛',
':microscope:': u'🔬',
':telescope:': u'🔭',
':newspaper:': u'📰',
':art:': u'🎨',
':clapper:': u'🎬',
':microphone:': u'🎤',
':headphones:': u'🎧',
':musical_score:': u'🎼',
':musical_note:': u'🎵',
':notes:': u'🎶',
':musical_keyboard:': u'🎹',
':violin:': u'🎻',
':trumpet:': u'🎺',
':saxophone:': u'🎷',
':guitar:': u'🎸',
':space_invader:': u'👾',
':video_game:': u'🎮',
':black_joker:': u'🃏',
':flower_playing_cards:': u'🎴',
':mahjong:': u'🀄',
':game_die:': u'🎲',
':dart:': u'🎯',
':football:': u'🏈',
':basketball:': u'🏀',
':soccer:': u'⚽',
':baseball:': u'⚾️',
':tennis:': u'🎾',
':8ball:': u'🎱',
':rugby_football:': u'🏉',
':bowling:': u'🎳',
':golf:': u'⛳',
':mountain_bicyclist:': u'🚵',
':bicyclist:': u'🚴',
':checkered_flag:': u'🏁',
':horse_racing:': u'🏇',
':trophy:': u'🏆',
':ski:': u'🎿',
':snowboarder:': u'🏂',
':swimmer:': u'🏊',
':surfer:': u'🏄',
':fishing_pole_and_fish:': u'🎣',
':coffee:': u'☕',
':tea:': u'🍵',
':sake:': u'🍶',
':baby_bottle:': u'🍼',
':beer:': u'🍺',
':beers:': u'🍻',
':cocktail:': u'🍸',
':tropical_drink:': u'🍹',
':wine_glass:': u'🍷',
':fork_and_knife:': u'🍴',
':pizza:': u'🍕',
':hamburger:': u'🍔',
':fries:': u'🍟',
':poultry_leg:': u'🍗',
':meat_on_bone:': u'🍖',
':spaghetti:': u'🍝',
':curry:': u'🍛',
':fried_shrimp:': u'🍤',
':bento:': u'🍱',
':sushi:': u'🍣',
':fish_cake:': u'🍥',
':rice_ball:': u'🍙',
':rice_cracker:': u'🍘',
':rice:': u'🍚',
':ramen:': u'🍜',
':stew:': u'🍲',
':oden:': u'🍢',
':dango:': u'🍡',
':egg:': u'🍳',
':bread:': u'🍞',
':doughnut:': u'🍩',
':custard:': u'🍮',
':icecream:': u'🍦',
':ice_cream:': u'🍨',
':shaved_ice:': u'🍧',
':birthday:': u'🎂',
':cake:': u'🍰',
':cookie:': u'🍪',
':chocolate_bar:': u'🍫',
':candy:': u'🍬',
':lollipop:': u'🍭',
':honey_pot:': u'🍯',
':apple:': u'🍎',
':green_apple:': u'🍏',
':tangerine:': u'🍊',
':lemon:': u'🍋',
':cherries:': u'🍒',
':grapes:': u'🍇',
':watermelon:': u'🍉',
':strawberry:': u'🍓',
':peach:': u'🍑',
':melon:': u'🍈',
':banana:': u'🍌',
':pear:': u'🍐',
':pineapple:': u'🍍',
':sweet_potato:': u'🍠',
':eggplant:': u'🍆',
':tomato:': u'🍅',
':corn:': u'🌽',
':house:': u'🏠',
':house_with_garden:': u'🏡',
':school:': u'🏫',
':office:': u'🏢',
':post_office:': u'🏣',
':hospital:': u'🏥',
':bank:': u'🏦',
':convenience_store:': u'🏪',
':love_hotel:': u'🏩',
':hotel:': u'🏨',
':wedding:': u'💒',
':church:': u'⛪',
':department_store:': u'🏬',
':european_post_office:': u'🏤',
':city_sunrise:': u'🌇',
':city_sunset:': u'🌆',
':japanese_castle:': u'🏯',
':european_castle:': u'🏰',
':tent:': u'⛺',
':factory:': u'🏭',
':tokyo_tower:': u'🗼',
':japan:': u'🗾',
':mount_fuji:': u'🗻',
':sunrise_over_mountains:': u'🌄',
':sunrise:': u'🌅',
':night_with_stars:': u'🌃',
':statue_of_liberty:': u'🗽',
':bridge_at_night:': u'🌉',
':carousel_horse:': u'🎠',
':ferris_wheel:': u'🎡',
':fountain:': u'⛲',
':roller_coaster:': u'🎢',
':ship:': u'🚢',
':boat:': u'⛵',
':sailboat:': u'⛵',
':speedboat:': u'🚤',
':rowboat:': u'🚣',
':anchor:': u'⚓',
':rocket:': u'🚀',
':airplane:': u'✈️',
':seat:': u'💺',
':helicopter:': u'🚁',
':steam_locomotive:': u'🚂',
':tram:': u'🚊',
':station:': u'🚉',
':mountain_railway:': u'🚞',
':train2:': u'🚆',
':bullettrain_side:': u'🚄',
':bullettrain_front:': u'🚅',
':light_rail:': u'🚈',
':metro:': u'🚇',
':monorail:': u'🚝',
':train:': u'🚋',
':railway_car:': u'🚃',
':trolleybus:': u'🚎',
':bus:': u'🚌',
':oncoming_bus:': u'🚍',
':blue_car:': u'🚙',
':oncoming_automobile:': u'🚘',
':car:': u'🚗',
':red_car:': u'🚗',
':taxi:': u'🚕',
':oncoming_taxi:': u'🚖',
':articulated_lorry:': u'🚛',
':truck:': u'🚚',
':rotating_light:': u'🚨',
':police_car:': u'🚓',
':oncoming_police_car:': u'🚔',
':fire_engine:': u'🚒',
':ambulance:': u'🚑',
':minibus:': u'🚐',
':bike:': u'🚲',
':aerial_tramway:': u'🚡',
':suspension_railway:': u'🚟',
':mountain_cableway:': u'🚠',
':tractor:': u'🚜',
':barber:': u'💈',
':busstop:': u'🚏',
':ticket:': u'🎫',
':vertical_traffic_light:': u'🚦',
':traffic_light:': u'🚥',
':warning:': u'⚠️',
':construction:': u'🚧',
':beginner:': u'🔰',
':fuelpump:': u'⛽',
':izakaya_lantern:': u'🏮',
':lantern:': u'🏮',
':slot_machine:': u'🎰',
':hotsprings:': u'♨️',
':moyai:': u'🗿',
':circus_tent:': u'🎪',
':performing_arts:': u'🎭',
':round_pushpin:': u'📍',
':triangular_flag_on_post:': u'🚩',
':jp:': u'🇯🇵',
':kr:': u'🇰🇷',
':de:': u'🇩🇪',
':cn:': u'🇨🇳',
':us:': u'🇺🇸',
':fr:': u'🇫🇷',
':es:': u'🇪🇸',
':it:': u'🇮🇹',
':ru:': u'🇷🇺',
':gb:': u'🇬🇧',
':uk:': u'🇬🇧',
':one:': u'1️⃣',
':two:': u'2️⃣',
':three:': u'3️⃣',
':four:': u'4️⃣',
':five:': u'5️⃣',
':six:': u'6️⃣',
':seven:': u'7️⃣',
':eight:': u'8️⃣',
':nine:': u'9️⃣',
':zero:': u'0️⃣',
':keycap_ten:': u'🔟',
':1234:': u'🔢',
':hash:': u'#️⃣',
':symbols:': u'🔣',
':arrow_up:': u'⬆️',
':arrow_down:': u'⬇️',
':arrow_left:': u'⬅️',
':arrow_right:': u'➡️',
':capital_abcd:': u'🔠',
':abcd:': u'🔡',
':abc:': u'🔤',
':arrow_upper_right:': u'↗️',
':arrow_upper_left:': u'↖️',
':arrow_lower_right:': u'↘️',
':arrow_lower_left:': u'↙️',
':left_right_arrow:': u'↔️',
':arrow_up_down:': u'↕️',
':arrows_counterclockwise:': u'🔄',
':arrow_backward:': u'◀️',
':arrow_forward:': u'▶️',
':arrow_up_small:': u'🔼',
':arrow_down_small:': u'🔽',
':leftwards_arrow_with_hook:': u'↩️',
':arrow_right_hook:': u'↪️',
':information_source:': u'ℹ️',
':rewind:': u'⏪',
':fast_forward:': u'⏩',
':arrow_double_up:': u'⏫',
':arrow_double_down:': u'⏬',
':arrow_heading_down:': u'⤵️',
':arrow_heading_up:': u'⤴️',
':ok:': u'🆗',
':twisted_rightwards_arrows:': u'🔀',
':repeat:': u'🔁',
':repeat_one:': u'🔂',
':new:': u'🆕',
':up:': u'🆙',
':cool:': u'🆒',
':free:': u'🆓',
':ng:': u'🆖',
':signal_strength:': u'📶',
':cinema:': u'🎦',
':koko:': u'🈁',
':u6307:': u'🈯',
':u7a7a:': u'🈳',
':u6e80:': u'🈵',
':u5408:': u'🈴',
':u7981:': u'🈲',
':ideograph_advantage:': u'🉐',
':u5272:': u'🈹',
':u55b6:': u'🈺',
':u6709:': u'🈶',
':u7121:': u'🈚',
':restroom:': u'🚻',
':mens:': u'🚹',
':womens:': u'🚺',
':baby_symbol:': u'🚼',
':wc:': u'🚾',
':potable_water:': u'🚰',
':put_litter_in_its_place:': u'🚮',
':parking:': u'🅿️',
':wheelchair:': u'♿',
':no_smoking:': u'🚭',
':u6708:': u'🈷️',
':u7533:': u'🈸',
':sa:': u'🈂️',
':m:': u'Ⓜ️',
':passport_control:': u'🛂',
':baggage_claim:': u'🛄',
':left_luggage:': u'🛅',
':customs:': u'🛃',
':accept:': u'🉑',
':secret:': u'㊙️',
':congratulations:': u'㊗️',
':cl:': u'🆑',
':sos:': u'🆘',
':id:': u'🆔',
':no_entry_sign:': u'🚫',
':underage:': u'🔞',
':no_mobile_phones:': u'📵',
':do_not_litter:': u'🚯',
':non-potable_water:': u'🚱',
':no_bicycles:': u'🚳',
':no_pedestrians:': u'🚷',
':children_crossing:': u'🚸',
':no_entry:': u'⛔',
':eight_spoked_asterisk:': u'✳️',
':sparkle:': u'❇️',
':negative_squared_cross_mark:': u'❎',
':white_check_mark:': u'✅',
':eight_pointed_black_star:': u'✴️',
':heart_decoration:': u'💟',
':vs:': u'🆚',
':vibration_mode:': u'📳',
':mobile_phone_off:': u'📴',
':a:': u'🅰️',
':b:': u'🅱️',
':ab:': u'🆎',
':o2:': u'🅾️',
':diamond_shape_with_a_dot_inside:': u'💠',
':loop:': u'➿',
':recycle:': u'♻️',
':aries:': u'♈',
':taurus:': u'♉',
':gemini:': u'♊',
':cancer:': u'♋',
':leo:': u'♌',
':virgo:': u'♍',
':libra:': u'♎',
':scorpius:': u'♏',
':sagittarius:': u'♐',
':capricorn:': u'♑',
':aquarius:': u'♒',
':pisces:': u'♓',
':ophiuchus:': u'⛎',
':six_pointed_star:': u'🔯',
':atm:': u'🏧',
':chart:': u'💹',
':heavy_dollar_sign:': u'💲',
':currency_exchange:': u'💱',
':copyright:': u'©️',
':registered:': u'®️',
':tm:': u'™️',
':x:': u'❌',
':bangbang:': u'‼️',
':interrobang:': u'⁉️',
':exclamation:': u'❗',
':heavy_exclamation_mark:': u'❗',
':question:': u'❓',
':grey_exclamation:': u'❕',
':grey_question:': u'❔',
':o:': u'⭕',
':top:': u'🔝',
':end:': u'🔚',
':back:': u'🔙',
':on:': u'🔛',
':soon:': u'🔜',
':arrows_clockwise:': u'🔃',
':clock12:': u'🕛',
':clock1230:': u'🕧',
':clock1:': u'🕐',
':clock130:': u'🕜',
':clock2:': u'🕑',
':clock230:': u'🕝',
':clock3:': u'🕒',
':clock330:': u'🕞',
':clock4:': u'🕓',
':clock430:': u'🕟',
':clock5:': u'🕔',
':clock530:': u'🕠',
':clock6:': u'🕕',
':clock7:': u'🕖',
':clock8:': u'🕗',
':clock9:': u'🕘',
':clock10:': u'🕙',
':clock11:': u'🕚',
':clock630:': u'🕡',
':clock730:': u'🕢',
':clock830:': u'🕣',
':clock930:': u'🕤',
':clock1030:': u'🕥',
':clock1130:': u'🕦',
':heavy_multiplication_x:': u'✖️',
':heavy_plus_sign:': u'➕',
':heavy_minus_sign:': u'➖',
':heavy_division_sign:': u'➗',
':spades:': u'♠️',
':hearts:': u'♥️',
':clubs:': u'♣️',
':diamonds:': u'♦️',
':white_flower:': u'💮',
':100:': u'💯',
':heavy_check_mark:': u'✔️',
':ballot_box_with_check:': u'☑️',
':radio_button:': u'🔘',
':link:': u'🔗',
':curly_loop:': u'➰',
':wavy_dash:': u'〰️',
':part_alternation_mark:': u'〽️',
':trident:': u'🔱',
':black_medium_square:': u'◼️',
':white_medium_square:': u'◻️',
':black_medium_small_square:': u'◾',
':white_medium_small_square:': u'◽',
':black_small_square:': u'▪️',
':white_small_square:': u'▫️',
':small_red_triangle:': u'🔺',
':black_square_button:': u'🔲',
':white_square_button:': u'🔳',
':black_circle:': u'⚫',
':white_circle:': u'⚪',
':red_circle:': u'🔴',
':large_blue_circle:': u'🔵',
':small_red_triangle_down:': u'🔻',
':white_large_square:': u'⬜',
':black_large_square:': u'⬛',
':large_orange_diamond:': u'🔶',
':large_blue_diamond:': u'🔷',
':small_orange_diamond:': u'🔸',
':small_blue_diamond:': u'🔹',
}
|
lord63/pyemojify
|
pyemojify/emoji.py
|
Python
|
mit
| 24,661
|
[
"Octopus"
] |
7e93dbe5033a136727027c2f3b808035b5bf64d40b1310944a69244ac00b42b8
|
# Import smorgasbord
import os
import sys
sys.path.insert(0, '../')
import gc
import pdb
import time
import math
import random
import re
import copy
import warnings
import multiprocessing as mp
import numpy as np
import scipy.optimize
import scipy.ndimage.measurements
import scipy.stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy.io.fits
import astropy.wcs
import astropy.convolution
import astropy.modeling
import skimage.restoration
import lmfit
import skimage
import ChrisFuncs
import ChrisFuncs.Photom
import ChrisFuncs.FromGitHub
import CAAPR
plt.ioff()
# The aperture-fitting sub-pipeline
def SubpipelinePhotom(source_dict, band_dict, kwargs_dict):
source_id = source_dict['name']+'_'+band_dict['band_name']
# Carry out small random wait, to stop RAM checks from syncing up
time.sleep(5.0*np.random.rand())
# Perform initial checks of target file type and location; return if not present
in_fitspath, file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, band_dict, kwargs_dict)
if file_found==False:
output_dict = {'band_name':band_dict['band_name'], 'ap_sum':np.NaN, 'ap_error':np.NaN}
return output_dict
# Create the pod (Photometry Organisation Dictionary), which will read in the FITS file, and bundle all the photometry data for this source & band into one dictionary to be passed between functions
pod = CAAPR.CAAPR_Pipeline.PodInitiate(in_fitspath, source_dict, band_dict, kwargs_dict)
# Run pod through preliminary processing, to determine initial quantities; if target not within bounds of map, end processing here
pod = CAAPR.CAAPR_Pipeline.MapPrelim(pod, source_dict, band_dict)
if pod['within_bounds']==False:
output_dict = {'band_name':band_dict['band_name'], 'ap_sum':np.NaN, 'ap_error':np.NaN}
return output_dict
CAAPR.CAAPR_IO.MemCheck(pod)
# Read in aperture file, extract aperture for current source, and apply aperture corrections for beam size
pod = AperturePrelim(pod, source_dict, band_dict, kwargs_dict)
# Check if this band is to be excluded from aperture-fitting; if so, return null aperture information
pod = ExcludePhotom(pod, source_dict, band_dict, kwargs_dict)
if pod['band_exclude']==True:
return pod['null_output_dict']
# If star-removal is required, run pod through AstroMagic
pod = CAAPR.CAAPR_AstroMagic.Magic(pod, source_dict, band_dict, kwargs_dict)
# Run pod through function that removes large-scale sky using a 2-dimensional polynomial filter, with source aperture masked
pod = CAAPR.CAAPR_Pipeline.PolySub(pod, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], instant_quit=max([not kwargs_dict['polysub'],pod['band_exclude']]))
# Run pod through function that (finally) performs the actual photometry
pod = Photom(pod, band_dict)
# If photometry recorded null flux, skip determination of aperture noise, recording null value for this too, instead
if np.isnan(pod['ap_sum'])==True:
pod['ap_error'] = np.NaN
else:
CAAPR.CAAPR_IO.MemCheck(pod)
# Attempt to determine aperture noise the preferred way, using full-size randomly-placed apertures
if kwargs_dict['verbose']: print '['+source_id+'] Estimating aperture noise using full-size randomly-placed sky apertures.'
ap_noise_dict = ApNoise(pod['cutout'].copy(), source_dict, band_dict, kwargs_dict, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], downsample=int(band_dict['downsample_factor']))
if ap_noise_dict['fail']==False:
if kwargs_dict['verbose']: print '['+source_id+'] Aperture noise successfully estimated using full-size randomly-placed sky apertures.'
pod['ap_noise'] = ap_noise_dict['ap_noise']
# If full-size apertures were unable to produce a valid aperture noise estimate, commence aperture extrapolation
else:
sky_success_counter = ap_noise_dict['sky_success_counter']
if kwargs_dict['verbose']: print '['+source_id+'] Unable to estiamte aperture noise using full-size randomly-placed sky apertures (only '+str(int(sky_success_counter))+' could be placed); switching to aperture extrapolation.'
CAAPR.CAAPR_IO.MemCheck(pod)
ap_noise_dict = ApNoiseExtrap(pod['cutout'].copy(), source_dict, band_dict, kwargs_dict, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
pod['ap_noise'] = ap_noise_dict['ap_noise']
# If even aperture extrapolation is unable to produce an aperture noise estimate, report null value
if ap_noise_dict['fail']==True:
pod['ap_noise'] = np.NaN
else:
if pod['verbose']: print '['+pod['id']+'] Final aperture noise is '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(pod['ap_noise'],4))+' (in map units).'
# Calculate calibration uncertainty
calib_err = abs( pod['ap_sum'] * band_dict['calib_error'] )
if pod['verbose']:print '['+pod['id']+'] Calibration uncertainty is '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(calib_err,4))+' (in map units).'
# Calculate final uncertainty
pod['ap_error'] = ( pod['ap_noise']**2.0 + calib_err**2.0 )**0.5
if np.isnan(pod['ap_noise'])==True:
pod['ap_error'] = -1.0 * pod['ap_sum'] * band_dict['calib_error']
# Run pod through function that performs aperture correction
pod = ApCorrect(pod, source_dict, band_dict, kwargs_dict)
# Use IRSA dust extinction service to correction fluxes for extinction
pod = ExtCorrrct(pod, source_dict, band_dict, kwargs_dict)
# If thumbnail images have been requested, save a copy of the current image (ie, with any star and/or background subtaction)
if kwargs_dict['thumbnails']==True:
astropy.io.fits.writeto(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_id+'.fits'), pod['cutout'], header=pod['in_header'], overwrite=True)
# Now return final photometry informaton to main pipeline, and clean up garbage
if np.isnan(pod['ap_sum'])==False:
if pod['verbose']:print '['+pod['id']+'] Final source flux and uncertainty is '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(pod['ap_sum'],4))+' +/- '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(pod['ap_error'],4))+' (in map units).'
output_dict = {'band_name':band_dict['band_name'], 'ap_sum':pod['ap_sum'], 'ap_error':pod['ap_error']}
gc.collect()
del(pod)
return output_dict
# Define functiont that reads in aperture file, identifies aperture for current source, and applies aperture corrections for beam size
def AperturePrelim(pod, source_dict, band_dict, kwargs_dict):
# Read in aperture file
aperture_table = np.genfromtxt(kwargs_dict['aperture_table_path'], delimiter=',', names=True, dtype=None)
aperture_index = np.where( aperture_table['name']==source_dict['name'] )
if aperture_index[0].shape[0]>1:
raise Exception('Aperture value caontains more than one entry for current galaxy')
else:
aperture_index = aperture_index[0][0]
# Extract aperture corresponding to current source, dealing with special case where aperture file contains only one source
if aperture_table['semimaj_arcsec'].shape==() and np.where( aperture_table['name']==source_dict['name'] )[0][0]==0:
opt_semimaj_arcsec = aperture_table['semimaj_arcsec']
opt_axial_ratio = aperture_table['axial_ratio']
opt_angle = aperture_table['pos_angle']
else:
opt_semimaj_arcsec = aperture_table['semimaj_arcsec'][aperture_index]
opt_axial_ratio = aperture_table['axial_ratio'][aperture_index]
opt_angle = aperture_table['pos_angle'][aperture_index]
opt_semimin_arcsec = opt_semimaj_arcsec / opt_axial_ratio
# Adjust aperture to account for beam size of current band, and record to pod
adj_semimaj_arcsec = 0.5 * ( (2.0*opt_semimaj_arcsec)**2.0 + band_dict['beam_arcsec']**2.0 )**0.5
adj_semimin_arcsec = 0.5 * ( (2.0*opt_semimin_arcsec)**2.0 + band_dict['beam_arcsec']**2.0 )**0.5
adj_axial_ratio = adj_semimaj_arcsec / adj_semimin_arcsec
adj_angle = opt_angle
# Save adjusted values to pod
pod['adj_semimaj_arcsec'] = adj_semimaj_arcsec
pod['adj_semimin_arcsec'] = adj_semimin_arcsec
pod['adj_semimaj_pix'] = adj_semimaj_arcsec / pod['pix_arcsec']
pod['adj_semimin_pix'] = adj_semimin_arcsec / pod['pix_arcsec']
pod['adj_axial_ratio'] = adj_axial_ratio
pod['adj_angle'] = adj_angle
# Return pod
return pod
# Define actual function that actually performs actual photometry
def Photom(pod, band_dict):
if pod['band_exclude']==True:
return pod
if pod['verbose']: print '['+pod['id']+'] Performing aperture photometry.'
# Evaluate pixels in source aperture; with consideration of sub-pixels if requested
if float(band_dict['subpixel_factor'])==1.0:
ap_calc = ChrisFuncs.Photom.EllipseSum(pod['cutout'], pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
elif float(band_dict['subpixel_factor'])>1.0:
ap_calc = ChrisFuncs.Photom.EllipseSumUpscale(pod['cutout'], pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], upscale=band_dict['subpixel_factor'])
# Evaluate pixels in background annulus; with consideration of sub-pixels if requested
bg_inner_semimaj_pix = pod['adj_semimaj_pix'] * band_dict['annulus_inner']
bg_width = (pod['adj_semimaj_pix'] * band_dict['annulus_outer']) - bg_inner_semimaj_pix
if float(band_dict['subpixel_factor'])==1.0:
bg_calc = ChrisFuncs.Photom.AnnulusSum(pod['cutout'], bg_inner_semimaj_pix, bg_width, pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
elif float(band_dict['subpixel_factor'])>1.0:
bg_calc = ChrisFuncs.Photom.AnnulusSumUpscale(pod['cutout'], bg_inner_semimaj_pix, bg_width, pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], upscale=band_dict['subpixel_factor'])
# Check what fraction of aperture an annulus pixels are nan
nan_fail = False
if np.where(np.isnan(ap_calc[2]))[0].shape[0]==0:
ap_nan_frac = 0.0
else:
ap_nan_frac = float(np.where(np.isnan(ap_calc[2]))[0].shape[0]) / float(ap_calc[1])
if np.where(np.isnan(bg_calc[2]))[0].shape[0]==0:
bg_nan_frac = 0.0
else:
bg_nan_frac = float(np.where(np.isnan(bg_calc[2]))[0].shape[0]) / float(bg_calc[1])
# If more than a givn fraction of the pixels inside the source aperture, or the background annulus, are NaN, record NaN flux, otherwise continue as per normal
ap_nan_thresh = 0.10
bg_nan_thresh = 0.80
if ap_nan_frac>ap_nan_thresh:
if pod['verbose']: print '['+pod['id']+'] More than '+str(int(100.0*ap_nan_thresh))+'% of pixels in source aperture are NaN; recording null flux.'
nan_fail = True
elif bg_nan_frac>bg_nan_thresh:
if pod['verbose']:print '['+pod['id']+'] More than '+str(int(100.0*bg_nan_thresh))+'% of pixels in background are NaN; recording null flux.'
nan_fail = True
if nan_fail==True:
pod['ap_sum'] = np.NaN
pod['bg_avg'] = np.NaN
return pod
else:
# Calculate background level, and subtract from flux in source aperture to determine source flux
bg_clip = ChrisFuncs.SigmaClip(bg_calc[2], median=False, sigma_thresh=3.0)
bg_avg = bg_clip[1] * float(band_dict['subpixel_factor'])**2.0
ap_sum = ap_calc[0] - (ap_calc[1] * bg_avg)
# Save values to pod, and return
pod['ap_sum'] = ap_sum
pod['bg_avg'] = bg_avg
if np.isnan(ap_sum):
if pod['verbose']:print '['+pod['id']+'] Source flux is NaN.'
else:
if pod['verbose']:print '['+pod['id']+'] Source flux is '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(ap_sum,4))+' (in map units).'
return pod
# Define funcion that attempts to estimate aperture noise using randomly-positioned sky apertures of given dimensions
def ApNoise(cutout, source_dict, band_dict, kwargs_dict, adj_semimaj_pix, adj_axial_ratio, adj_angle, centre_i, centre_j, mini=False, downsample=False):
source_id = source_dict['name']+'_'+band_dict['band_name']
ap_debug = False
# Handle downsampling input
if downsample!=False:
ds_request = int(downsample)
else:
ds_request = 1
# Standard downsampling target is for aperture diameter to correspod to 200 pixels
ds_target = int( np.round( float(adj_semimaj_pix) ) / 100.0 )
ds_factor = max([ ds_request, ds_target ])
# If mini-apertures are being used, ensure downsampling isn't agressive (ie, apertures would be sub-Nyquist sampled)
if mini!=False:
ds_mini = int( np.round( float(mini-1.0) ) / 2.355 )
if ds_mini>=2:
ds_factor = min([ ds_factor, ds_mini ])
else:
ds_factor = 1
# If downsampling is makes sense, apply
if ds_factor>=2:
if ap_debug: print 'Setup: Downsampling map by factor of '+str(ds_factor)
cutout = skimage.measure.block_reduce(cutout, block_size=(int(ds_factor),int(ds_factor)), func=np.mean, cval=np.NaN)
cutout *= float(ds_factor)*float(ds_factor)
centre_i /= float(ds_factor)
centre_j /= float(ds_factor)
adj_semimaj_pix /= float(ds_factor)
if mini!=False:
mini /= float(ds_factor)
# Handle input variables if mini-apertures are required
if mini!=False:
if ap_debug: print 'Setup: Preparing inputs for mini-apertures'
if isinstance(mini, float) or isinstance(mini, int):
mini = float(mini)
adj_semimaj_pix_full = adj_semimaj_pix
adj_semimaj_pix = mini
else:
pdb.set_trace()
else:
adj_semimaj_pix_full = adj_semimaj_pix
adj_semimin_pix_full = adj_semimaj_pix_full / adj_axial_ratio
# Define charactaristics of circular aperture with same area as elliptical source aperture
ap_area = np.pi * adj_semimaj_pix * ( adj_semimaj_pix / adj_axial_ratio )
sky_ap_rad_pix = ( ap_area / np.pi )**0.5
sky_border = int( sky_ap_rad_pix + 1.0 )#int( ( band_dict['annulus_outer'] * sky_ap_rad_pix ) + 1 )
adj_semimin_pix = adj_semimaj_pix / adj_axial_ratio
# Creating mask maps to describe no-go regions
if ap_debug: print 'Setup: Creating mask maps'
prior_mask = np.zeros(cutout.shape)
exclude_mask = ChrisFuncs.Photom.EllipseMask(cutout, adj_semimaj_pix_full, adj_axial_ratio, adj_angle, centre_i, centre_j)
flag_mask = np.zeros(cutout.shape)
attempt_mask = np.zeros(cutout.shape)
# Set pixels in source aperture to all have NaN pixels, so they don't get sampled by sky annuli
cutout_inviolate = cutout.copy()
cutout[ np.where(ChrisFuncs.Photom.EllipseMask(cutout, adj_semimaj_pix_full, adj_axial_ratio, adj_angle, centre_i, centre_j)==1) ] = np.NaN
# Define how many random aperture are desired/required/permitted
sky_success_target = 100
sky_success_min = 50
sky_gen_max = 250
# Generate random polar coordinates to draw from
if ap_debug: print 'Setup: Generating pool of random polar coordinates'
random_size = sky_success_target * sky_gen_max * 10
random_failed = []
random_theta_list = 360.0 * np.random.rand(random_size)
random_r_list = adj_semimin_pix + np.abs(np.random.normal(loc=0.0, scale=5.0*adj_semimaj_pix_full, size=random_size))
# Locate contiguous map regions
if ap_debug: print 'Pruning: Locating contiguous coverage regions'
cont_binary = np.zeros(cutout.shape)
cont_binary[ np.where( np.isnan(cutout_inviolate)==False ) ] = 1
cont_structure = np.array([[1,1,1], [1,1,1], [1,1,1]])
cont_label = scipy.ndimage.measurements.label(cont_binary, structure=cont_structure)[0]
cont_search_mask = ChrisFuncs.EllipseMask(cont_label, 3.0, 1.0, 0.0, centre_i, centre_j)
cont_search_values = cont_label[ np.where( cont_search_mask==1 ) ]
# Identify contiguous region associated with target source
if ap_debug: print 'Pruning: Identifying coverage region associated with source'
if np.where(cont_search_values>0)[0].shape[0]==0:
cont_target = 0
else:
cont_target = scipy.stats.mode(cont_search_values[np.where(cont_search_values>0)])[0][0]
cont_where_bad = np.where( cont_label!=cont_target )
# Remove random coordinates that are more distant than most distant part of the coverage region the target source lies in
if ap_debug: print 'Pruning: Removing random coords that definately lie outside coverage region'
cont_size_i, cont_size_j = cutout.shape
cont_range_i = np.arange(cont_size_i) - centre_i
cont_range_j = np.arange(cont_size_j) - centre_j
cont_coord_i, cont_coord_j = np.meshgrid(cont_range_j, cont_range_i) # Yes, i and j are supposed to be this way around inside meshgrid (for some reason)
cont_coord_i[cont_where_bad] = np.NaN
cont_coord_j[cont_where_bad] = np.NaN
cont_dist = np.sqrt(cont_coord_i**2 + cont_coord_j**2)
cont_dist_max = np.nanmax(cont_dist)
random_r_coverage = np.where( random_r_list < (cont_dist_max-sky_border) )
random_r_list = random_r_list[random_r_coverage]
random_theta_list = random_theta_list[random_r_coverage]
# Convert random polar coordinates into cartesian coordinates
if ap_debug: print 'Pruning: Converting random polar coords to cartesian coords, and removing those that lie beyond map border'
random_i_list = centre_i + ( random_r_list * np.cos(np.radians(random_theta_list)) )#np.random.normal(loc=centre_i, scale=2.0*sky_ap_rad_pix)
random_j_list = centre_j + ( random_r_list * np.sin(np.radians(random_theta_list)) )#np.random.normal(loc=centre_j, scale=2.0*sky_ap_rad_pix)
# Remove random coodinates that fall fall beyond border in i-coords
random_not_i_border = np.where( (random_i_list>sky_border) & (random_i_list<(cutout.shape[0]-sky_border)) )
random_i_list = random_i_list[random_not_i_border]
random_j_list = random_j_list[random_not_i_border]
# Remove random coodinates that fall fall beyond border in j-coords
random_not_j_border = np.where( (random_j_list>sky_border) & (random_j_list<(cutout.shape[1]-sky_border)) )
random_i_list = random_i_list[random_not_j_border]
random_j_list = random_j_list[random_not_j_border]
# Remove random coordinates that intersect source
if ap_debug: print 'Pruning: Removing random coords that intersect source'
random_not_source = np.where( np.sqrt( (np.abs(centre_i-random_i_list))**2.0 + (np.abs(centre_j-random_j_list))**2.0 ) > adj_semimin_pix_full )
#random_not_source = np.where( (abs(centre_i-random_i_list)>adj_semimin_pix_full) & (abs(centre_j-random_j_list)>adj_semimin_pix_full) )
random_i_list = random_i_list[random_not_source]
random_j_list = random_j_list[random_not_source]
# Remove random coordinates that correspond to NaN pixels
if ap_debug: print 'Pruning: Removing random coords that correspond to NaN pixels'
random_i_list_pix = np.round(random_i_list).astype(int)
random_j_list_pix = np.round(random_j_list).astype(int)
random_ij_values = cutout[(random_i_list_pix,random_j_list_pix)]
random_ij_pix_good = np.where(np.isnan(random_ij_values)==False)
random_i_list = random_i_list[random_ij_pix_good]
random_j_list = random_j_list[random_ij_pix_good]
# If none of the apertures are suitable, immediately report failure
if random_i_list.shape[0]==0:
if ap_debug: print 'Status: Pruning removed all generated coordinates'
ap_noise_dict = {'fail':True, 'prior_mask':prior_mask, 'flag_mask':flag_mask, 'sky_success_counter':0}
cutout = cutout_inviolate
return ap_noise_dict
# Commence creation of random sky apertures
sky_success_counter = 0
sky_sum_list = []
sky_total_fail = False
while True:
# Repeatedly generate random sky apertures, until an acceptable aperture is generated
sky_gen_counter = 0
sky_gen_fail = False
while True:
sky_gen_counter += 1
# If more than a given number of unsuccessful sky apertures have been generated in a row, call it a day
if sky_gen_counter>sky_gen_max:
sky_gen_fail = True
if ap_debug: print 'Status: Unable to generate suitable random sky aperture after '+str(sky_gen_max)+' attempts'
break
# Select random coordinate set for this iteration; if no un-used random coordinates can be found, reject
#if ap_debug: print 'Checking: Selecting random coordinates'
random_accept = False
random_reject_count = 0
while random_accept==False:
random_index = int( np.floor( np.random.rand() * float(random_i_list.shape[0]) ) )
if random_index not in random_failed:
random_accept = True
else:
random_reject_count += 1
if random_reject_count>(10*random_i_list.shape[0]):
break
if random_reject_count>(10*random_i_list.shape[0]):
if ap_debug: print 'Rejection: Unable to find un-used random coodinates'
continue
random_i = random_i_list[random_index]
random_j = random_j_list[random_index]
if ap_debug:
ap_mask = ChrisFuncs.Photom.EllipseMask(cutout, sky_ap_rad_pix, 1.0, 0.0, random_i, random_j)
attempt_mask[ np.where(ap_mask==1) ] = sky_success_counter
print 'Aperture: '+str(sky_success_counter+1)+'; Generation: '+str(sky_gen_counter)+'; Pix Coords: ['+str(random_i)+','+str(random_j)+']'
# Do sophisticated check that generated sky aperture does not intersect source; if it does, reject
#if ap_debug: print 'Checking: Determining whether aperture intersects source (sophisticated check)'
exclude_sum = ChrisFuncs.Photom.EllipseSum(exclude_mask, sky_ap_rad_pix, 1.0, 0.0, random_i, random_j)[0]
if exclude_sum>0:
if ap_debug: print 'Rejection: Aperture intersects source (according to sophisticated check)'
random_failed.append(random_index)
continue
# Do basic chrck that the majority of the pixels in the generated sky aperture have not already been sampled by previous sky apertures; they have, reject
#if ap_debug: print 'Checking: Determining if aperture over-sampled (basic check)'
prior_calc = ChrisFuncs.Photom.EllipseSum(prior_mask, sky_ap_rad_pix, 1.0, 0.0, random_i, random_j)
prior_calc[2][ np.where(prior_calc[2]>=1.0) ] = 1.0
prior_frac = np.sum(prior_calc[2]) / float(prior_calc[1])
if prior_frac>0.5:
if ap_debug: print 'Rejection: Aperture over-sampled (according to basic check)'
random_failed.append(random_index)
continue
# Do sophisticated check that the majority of the pixels in the generated sky aperture have not already been sampled by previous sky apertures; they have, reject
#if ap_debug: print 'Checking: Determinging if aperture oversampled (sophisticated check)'
ap_mask_check = ChrisFuncs.Photom.EllipseMask(cutout, sky_ap_rad_pix, 1.0, 0.0, random_i, random_j)
flag_mask_check = flag_mask.copy()
flag_mask_check[np.where(ap_mask_check==1)] = int(2.0**(sky_success_counter+1.0))
flag_tallies = np.array([ np.where(flag_mask_check==flag)[0].shape[0] for flag in (2.0**np.arange(0.0,sky_success_counter+2.0)).tolist() ])
flag_check = np.where(flag_tallies<(0.5*ap_area))[0].shape[0]
if flag_check>1:
if ap_debug: print 'Rejection: Aperture over-sampled (according to sophisticated check)'
random_failed.append(random_index)
continue
# Evaluate pixels in sky aperture
#if ap_debug: print 'Checking: Evaluating pixels in sky aperture'
ap_calc = ChrisFuncs.Photom.EllipseSum(cutout, sky_ap_rad_pix, 1.0, 0.0, random_i, random_j)
# Evaluate pixels in sky annulus
#if ap_debug: print 'Checking: Evaluating pixels in sky anmnulus'
bg_inner_semimaj_pix = adj_semimaj_pix * band_dict['annulus_inner']
bg_width = (adj_semimaj_pix * band_dict['annulus_outer']) - bg_inner_semimaj_pix
bg_calc = ChrisFuncs.Photom.AnnulusSum(cutout, bg_inner_semimaj_pix, bg_width, 1.0, 0.0, random_i, random_j)
# Check if more than a given fraction of the pixels inside the source aperture are NaN; if so, reject
if ap_calc[3][0].shape[0]==0 or ap_calc[1]==0:
ap_nan_frac = 0.0
if ap_calc[1]==0:
ap_nan_frac = 1.0
else:
ap_nan_frac = float(ap_calc[3][0].shape[0]) / float(ap_calc[1]+float(ap_calc[3][0].shape[0]))
ap_nan_thresh = 0.10
if ap_nan_frac>ap_nan_thresh:
if ap_debug: print 'Rejection: Aperture contains too many NaNs'
random_failed.append(random_index)
continue
# Check if more than a given fraction of the pixels inside the sky annulus are NaN; if so, reject
if bg_calc[3][0].shape[0]==0:
bg_nan_frac = 0.0
if bg_calc[1]==0:
bg_nan_frac = 1.0
else:
bg_nan_frac = float(bg_calc[3][0].shape[0]) / float(bg_calc[1]+bg_calc[3][0].shape[0])
bg_nan_thresh = 0.80
if bg_nan_frac>bg_nan_thresh:
if ap_debug: print 'Rejection: Annulus contains too many NaNs'
random_failed.append(random_index)
continue
# If coords have not been rejected for any reason, accept them and proceed
else:
sky_success_counter += 1
break
# If no suitable sky aperture could be generated on this iteration, decide how to proceed, based on how many had been successfully generated already
if sky_gen_fail:
if sky_success_counter<sky_success_min:
sky_total_fail = True
break
else:
if ap_debug: print 'Status: However, sufficient number of successful random apertures ('+str(int(sky_success_counter))+') already generated; proceeding'
break
# Calculate actual flux in sky aperture, and record
#if ap_debug: print 'Checking: Performing photometry with random sky aperture and annulus'
bg_clip = ChrisFuncs.SigmaClip(bg_calc[2], median=False, sigma_thresh=3.0)
bg_avg = bg_clip[1]
ap_sum = ap_calc[0] - (ap_calc[1] * bg_avg)
sky_sum_list.append(ap_sum)
if np.isnan(ap_sum):
pdb.set_trace()
# Add this aperture to the prior mask and flag mask
if not ap_debug: ap_mask = ChrisFuncs.Photom.EllipseMask(cutout, sky_ap_rad_pix, 1.0, 0.0, random_i, random_j)
prior_mask += ap_mask
flag_mask[np.where(ap_mask==1)] += 2.0**sky_success_counter
# If target number of sky apertures have been processed, break out of loop
if sky_success_counter>=sky_success_target:
if ap_debug: print 'Status: Target number of successful random apertures ('+str(int(sky_success_target))+') generated; proceeding'
break
# If total failure was encountered, end process and report now
if sky_total_fail:
ap_noise_dict = {'fail':True, 'prior_mask':prior_mask, 'flag_mask':flag_mask, 'sky_success_counter':sky_success_counter}
cutout = cutout_inviolate
return ap_noise_dict
# Otherwise, calculate aperture noise using returned aperture values, and return
else:
sky_sum_list = np.array(sky_sum_list)
ap_noise = ChrisFuncs.SigmaClip(sky_sum_list, tolerance=0.001, median=True, sigma_thresh=3.0)[0]
ap_noise_dict = {'fail':False, 'ap_noise':ap_noise, 'ap_num':sky_success_counter, 'prior_mask':prior_mask, 'flag_mask':flag_mask}
#ChrisFuncs.Cutout(prior_mask, '/home/saruman/spx7cjc/DustPedia/Prior.fits')
if kwargs_dict['verbose']: print '['+source_id+'] Aperture noise from current random apertures is '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(ap_noise,4))+' (in map units).'
cutout = cutout_inviolate
return ap_noise_dict
# Define funcion that attempts to estimate aperture noise using randomly-positioned sky apertures of given dimensions
def ApNoiseExtrap(cutout, source_dict, band_dict, kwargs_dict, adj_semimaj_pix, adj_axial_ratio, adj_angle, centre_i, centre_j):
source_id = source_dict['name']+'_'+band_dict['band_name']
# Define charactaristics of circular aperture with same area as elliptical source aperture
ap_area = np.pi * adj_semimaj_pix * ( adj_semimaj_pix / adj_axial_ratio )
sky_ap_rad_pix = ( ap_area / np.pi )**0.5
# Generate list of mini-aperture sizes to use, and declare result lists
mini_ap_rad_base = 1.2
mini_ap_rad_pix_input = mini_ap_rad_base**np.arange( 1.0, np.ceil( math.log( sky_ap_rad_pix, mini_ap_rad_base ) ) )[::-1]
min_ap_rad_pix_output = []
mini_ap_noise_output = []
mini_ap_num_output = []
# Loop over radii for mini-apertures
for mini_ap_rad_pix in mini_ap_rad_pix_input:
if kwargs_dict['verbose']:print '['+source_id+'] Finding aperture noise for mini-apertures of radius '+str(mini_ap_rad_pix)[:6]+' pixels.'
mini_ap_noise_dict = ApNoise(cutout.copy(), source_dict, band_dict, kwargs_dict, adj_semimaj_pix, adj_axial_ratio, adj_angle, centre_i, centre_j, mini=mini_ap_rad_pix, downsample=int(band_dict['downsample_factor']))
# If mini-aperture succeeded, record and proceed; else, call it a day
if mini_ap_noise_dict['fail']==False:
min_ap_rad_pix_output.append(mini_ap_rad_pix)
mini_ap_noise_output.append(mini_ap_noise_dict['ap_noise'])
mini_ap_num_output.append(mini_ap_noise_dict['ap_num'])
elif mini_ap_noise_dict['fail']==True:
if kwargs_dict['verbose']:print '['+source_id+'] Unable to place sufficient number of mini-apertures at this radius.'
if len(mini_ap_noise_output)>=10:
break
# Convert output lists into arrays
min_ap_rad_pix_output = np.array(min_ap_rad_pix_output)
mini_ap_noise_output = np.array(mini_ap_noise_output)
mini_ap_num_output = np.array(mini_ap_num_output).astype(float)
# If insufficient points to make extrapolation, report failure; else proceed
if min_ap_rad_pix_output.shape[0]<2:
ap_noise_dict = {'fail':True, 'ap_noise':np.NaN}
gc.collect()
return ap_noise_dict
else:
# Calculate log of mini aperture area and noise
log_mini_ap_area = np.log10(np.pi*min_ap_rad_pix_output**2.0)
log_mini_ap_noise = np.log10(mini_ap_noise_output)
# Calculate poisson uncertaity on calculated noise
mini_ap_noise_err_rel = mini_ap_num_output**0.5 / mini_ap_num_output
mini_ap_noise_err = np.abs( mini_ap_noise_output * mini_ap_noise_err_rel )
# Weight points according to distance in log space from true aperture area
mini_ap_noise_err = mini_ap_noise_err * (1.0 + ( ap_area / 10.0**log_mini_ap_area ) )
# Translate uncertainties into log space
log_mini_ap_noise_err = ChrisFuncs.LogError(mini_ap_noise_output, mini_ap_noise_err)
# Define straight-line function, and fit it to points
def Line(x,m,c):
return (m*x)+c
try:
line_fit = scipy.optimize.curve_fit(Line, log_mini_ap_area, log_mini_ap_noise, sigma=log_mini_ap_noise_err)
except:
ap_noise_dict = {'fail':True, 'ap_noise':np.NaN}
gc.collect()
return ap_noise_dict
line_m, line_c = line_fit[0][0], line_fit[0][1]
# Determine projected aperture noise value
log_ap_area = np.log10(ap_area)
log_ap_noise_proj = Line(log_ap_area, line_m, line_c) #10.0**( ( line_m * np.log10(ap_area) ) + line_c )
ap_noise_proj = 10.0**(log_ap_noise_proj)
# Generate points for best-fit line
ax_y_min = np.floor( np.min([ np.min( log_mini_ap_noise - log_mini_ap_noise_err ), log_ap_noise_proj ]) )
ax_y_max = np.ceil( np.max([ np.max( log_mini_ap_noise + log_mini_ap_noise_err ), log_ap_noise_proj ]) )
ax_x_min = np.floor( np.min([ np.min(log_mini_ap_area), log_ap_area ]) )
ax_x_max = np.ceil( np.max([ np.max(log_ap_area), log_ap_area ]) )
line_x = np.linspace( ax_x_min, ax_x_max, num=10000 )
line_y = Line(line_x, line_m, line_c)
# Set up figure & axes
fig = plt.figure(figsize=(8,6))
ax_dims = [0.125, 0.125, 0.825, 0.825]
ax = fig.add_axes(ax_dims)
# Plot points and best-fit line
ax.errorbar(log_mini_ap_area, log_mini_ap_noise, yerr=log_mini_ap_noise_err, ecolor='#4D78C9', elinewidth=1.15, capthick=1.15, marker='x', color='#0080FF', markersize=0.0, markeredgewidth=1.15, linewidth=0)
ax.scatter(log_mini_ap_area, log_mini_ap_noise, c='#4D78C9', marker='o', s=75, linewidths=0, label='Mini-aperture noise values')
ax.scatter(np.log10(ap_area), log_ap_noise_proj, c='#C03030', marker='H', s=150, linewidths=0, label='Extrapolated aperture noise')
ax.plot(line_x, line_y, ls='--', lw=1.0, c='#4D78C9', label='Line of best fit')
# Format axis limts and labels
ax.set_xlabel(r'Aperture Area (log$_{10}$ pix)', fontsize=15)
ax.set_ylabel(r'Aperture Noise (log$_{10}$ map units)', fontsize=15)
ax.set_xlim(ax_x_min,ax_x_max)
ax.set_ylim(ax_y_min,ax_y_max)
for xlabel in ax.get_xticklabels():
xlabel.set_fontproperties(matplotlib.font_manager.FontProperties(size=15))
for ylabel in ax.get_yticklabels():
ylabel.set_fontproperties(matplotlib.font_manager.FontProperties(size=15))
# Plot axis 1 legend
ax_handles, ax_labels = ax.get_legend_handles_labels()
ax1_lgd = ax.legend(ax_handles, ax_labels, loc='best', scatterpoints=1, labelspacing=0.25, borderpad=0)
ax1_lgd.draw_frame(False)
plt.setp(plt.gca().get_legend().get_texts(), fontsize='12.5')
# Save figure, clean up, and report results
fig.savefig( os.path.join( kwargs_dict['temp_dir_path'], source_id+'_Aperture_Noise_Projection.png' ), dpi=100 )
gc.collect()
fig.clear()
plt.close('all')
ap_noise_dict = {'fail':False, 'ap_noise':ap_noise_proj}
return ap_noise_dict
# Define function that uses provided beam profile to aperture-correct photometry
def ApCorrect(pod, source_dict, band_dict, kwargs_dict):
# If aperture correction not required, immediately return unaltered pod
if str(band_dict['beam_correction'])=='False':
return pod
elif kwargs_dict['verbose']: print '['+pod['id']+'] Determining aperture correction factor due to PSF.'
# If SNR ratio is <3, do not perform aperture correction
snr = pod['ap_sum'] / abs(pod['ap_error'])
if snr<3.0:
if kwargs_dict['verbose']: print '['+pod['id']+'] Source SNR<3; not applying aperture correction, as reuslt would be unreliable.'
return pod
# If no PSF given, assume Airy disc; else extract PSF from provided file
if str(band_dict['beam_correction'])=='True':
psf = astropy.convolution.kernels.AiryDisk2DKernel(pod['beam_pix']).array
else:
# Read in PSF, and establish pixel size
psf_in, psf_header = astropy.io.fits.getdata(band_dict['beam_correction'], header=True)
psf_wcs = astropy.wcs.WCS(psf_header)
if psf_wcs.wcs.has_cd():
psf_cdelt = psf_wcs.wcs.cd.max()
else:
psf_cdelt = psf_wcs.wcs.cdelt.max()
psf_cdelt_arcsec = abs( psf_cdelt * 3600.0 )
# If PSF pixel size is different to map pixel size, rescale PSF accordingly
if (pod['pix_arcsec']/psf_cdelt_arcsec)>1.001 or (pod['pix_arcsec']/psf_cdelt_arcsec)<0.999:
zoom_factor = float(psf_cdelt_arcsec) / float(pod['pix_arcsec'])
psf = scipy.ndimage.zoom(psf_in, (zoom_factor,zoom_factor), mode='nearest')
# If necessary, rebin the PSF so it has odd dimensions
if not ( psf.shape[0]%2!=0 and psf.shape[1]%2!=0 ):
psf = ChrisFuncs.FromGitHub.martynbristow.rebin(psf, tuple(np.array(psf.shape)-1))
# Else, if pixel sizes are already the same, leave as-is
elif ((pod['pix_arcsec']/psf_cdelt_arcsec)>=0.999) and ((pod['pix_arcsec']/psf_cdelt_arcsec)<=0.001):
psf = psf_in.copy()
# Normalise PSF
psf /= np.nansum(psf)
# Background-subtract cutout
cutout = pod['cutout'] - pod['bg_avg']
# Produce mask for pixels we care about for fitting (ie, are inside photometric aperture and background annulus)
mask = ChrisFuncs.Photom.EllipseMask(cutout, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j']) #*band_dict['annulus_outer']
# Produce guess values
initial_sersic_amplitide = cutout[ int(round(pod['centre_i'])), int(round(pod['centre_j'])) ]
initial_sersic_r_eff = pod['adj_semimaj_pix'] / 10.0
initial_sersic_n = 1.0
initial_sersic_x_0 = int(round(pod['centre_j']))
initial_sersic_y_0 = int(round(pod['centre_i']))
initial_sersic_ellip = ( pod['adj_axial_ratio'] - 1.0 ) / pod['adj_axial_ratio']
initial_sersic_theta = np.deg2rad( pod['adj_angle'] )
# Produce sersic model from guess parameters, for time trials
sersic_x, sersic_y = np.meshgrid( np.arange(cutout.shape[1]), np.arange(cutout.shape[0]) )
sersic_model = astropy.modeling.models.Sersic2D(amplitude=initial_sersic_amplitide, r_eff=initial_sersic_r_eff, n=initial_sersic_n, x_0=initial_sersic_x_0, y_0=initial_sersic_y_0, ellip=initial_sersic_ellip, theta=initial_sersic_theta)
sersic_map = sersic_model(sersic_x,sersic_y)
# Make sure that PSF array is smaller than sersic model array (as required for convolution); if not, remove its edges such that it is
if psf.shape[0]>sersic_map.shape[0] or psf.shape[1]>sersic_map.shape[1]:
excess = max( psf.shape[0]-sersic_map.shape[0], psf.shape[1]-sersic_map.shape[1] )
border = max( 2, int( np.round( np.ceil( float(excess) / 2.0 ) - 1.0 ) ) )
psf = psf[border:,border:]
psf = psf[:-border,:-border]
# Determine wither FFT convolution or direct convolution is faster for this kernel, using sersic model produced with guess parameters
time_fft = time.time()
conv_map = astropy.convolution.convolve_fft(sersic_map, psf, normalize_kernel=True)
time_fft = time.time() - time_fft
use_fft = True
if not use_fft:
if time_fft<10.0:
use_fft = True
else:
time_direct = time.time()
conv_map = astropy.convolution.convolve(sersic_map, psf, normalize_kernel=True)
time_direct = time.time() - time_direct
if time_fft<time_direct:
use_fft = True
else:
use_fft = False
# Set up parameters to fit galaxy with 2-dimensional sersic profile
params = lmfit.Parameters()
params.add('sersic_amplitide', value=initial_sersic_amplitide, vary=True)
params.add('sersic_r_eff', value=initial_sersic_r_eff, vary=True, min=0.0, max=pod['adj_semimaj_pix'])
params.add('sersic_n', value=initial_sersic_n, vary=True, min=0.1, max=10)
params.add('sersic_x_0', value=initial_sersic_x_0, vary=False)
params.add('sersic_y_0', value=initial_sersic_y_0, vary=False)
params.add('sersic_ellip', value=initial_sersic_ellip, vary=True, min=0.5*initial_sersic_ellip, max=0.5*(1.0-initial_sersic_ellip)+initial_sersic_ellip)
params.add('sersic_theta', value=initial_sersic_theta, vary=False)
# Solve with LMfit to find parameters of best-fit sersic profile
result = lmfit.minimize(Sersic_LMfit, params, args=(pod, cutout, psf, mask, use_fft), method='leastsq', ftol=1E-5, xtol=1E-5, maxfev=200)
# Extract best-fit results
sersic_amplitide = result.params['sersic_amplitide'].value
sersic_r_eff = result.params['sersic_r_eff'].value
sersic_n = result.params['sersic_n'].value
sersic_x_0 = result.params['sersic_x_0'].value
sersic_y_0 = result.params['sersic_y_0'].value
sersic_ellip = result.params['sersic_ellip'].value
sersic_theta = result.params['sersic_theta'].value
# Construct underlying sersic map and convolved sersic map, using best-fit parameters
sersic_model = astropy.modeling.models.Sersic2D(amplitude=sersic_amplitide, r_eff=sersic_r_eff, n=sersic_n, x_0=sersic_x_0, y_0=sersic_y_0, ellip=sersic_ellip, theta=sersic_theta)
sersic_map = sersic_model(sersic_x, sersic_y)
if use_fft==True:
conv_map = astropy.convolution.convolve_fft(sersic_map, psf, normalize_kernel=True)
elif use_fft==False:
conv_map = astropy.convolution.convolve(sersic_map, psf, normalize_kernel=True)
# Determine annulus properties before proceeding with photometry
bg_inner_semimaj_pix = pod['adj_semimaj_pix'] * band_dict['annulus_inner']
bg_width = (pod['adj_semimaj_pix'] * band_dict['annulus_outer']) - bg_inner_semimaj_pix
# Evaluate pixels in source aperture and background annulus unconvoled sersic map
if float(band_dict['subpixel_factor'])==1.0:
sersic_ap_calc = ChrisFuncs.Photom.EllipseSum(sersic_map, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
sersic_bg_calc = ChrisFuncs.Photom.AnnulusSum(sersic_map, bg_inner_semimaj_pix, bg_width, pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
elif float(band_dict['subpixel_factor'])>1.0:
sersic_ap_calc = ChrisFuncs.Photom.EllipseSumUpscale(sersic_map, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], upscale=band_dict['subpixel_factor'])
sersic_bg_calc = ChrisFuncs.Photom.AnnulusSumUpscale(sersic_map, bg_inner_semimaj_pix, bg_width, pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], upscale=band_dict['subpixel_factor'])
# Background-subtract and measure unconvoled sersic source flux
sersic_bg_clip = ChrisFuncs.SigmaClip(sersic_bg_calc[2], median=False, sigma_thresh=3.0)
sersic_bg_avg = sersic_bg_clip[1] * float(band_dict['subpixel_factor'])**2.0
#sersic_bg_avg = np.nanmedian(sersic_bg_calc[2])
sersic_ap_sum = sersic_ap_calc[0] - (sersic_ap_calc[1] * sersic_bg_avg)
# Evaluate pixels in source aperture and background annulus in convolved sersic map
if float(band_dict['subpixel_factor'])==1.0:
conv_ap_calc = ChrisFuncs.Photom.EllipseSum(conv_map, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
conv_bg_calc = ChrisFuncs.Photom.AnnulusSum(conv_map, bg_inner_semimaj_pix, bg_width, pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'])
elif float(band_dict['subpixel_factor'])>1.0:
conv_ap_calc = ChrisFuncs.Photom.EllipseSumUpscale(conv_map, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], upscale=band_dict['subpixel_factor'])
conv_bg_calc = ChrisFuncs.Photom.AnnulusSumUpscale(conv_map, bg_inner_semimaj_pix, bg_width, pod['adj_axial_ratio'], pod['adj_angle'], pod['centre_i'], pod['centre_j'], upscale=band_dict['subpixel_factor'])
# Background-subtract and measure convolved sersic source flux
conv_bg_clip = ChrisFuncs.SigmaClip(conv_bg_calc[2], median=False, sigma_thresh=3.0)
conv_bg_avg = conv_bg_clip[1] * float(band_dict['subpixel_factor'])**2.0
#conv_bg_avg = np.nanmedian(conv_bg_calc[2])
conv_ap_sum = conv_ap_calc[0] - (conv_ap_calc[1] * conv_bg_avg)
# Find difference between flux measued on convoled and unconvoled sersic maps
ap_correction = np.nanmax([ 1.0, (sersic_ap_sum/conv_ap_sum) ])
# Apply aperture correction to pod, and return
if kwargs_dict['verbose']: print '['+pod['id']+'] Applying aperture correction factor of '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(ap_correction,5))+'.'
pod['ap_sum'] *= ap_correction
pod['ap_error'] *= ap_correction
return pod #astropy.io.fits.writeto('/home/saruman/spx7cjc/DustPedia/Conv.fits', conv_map, header=pod['in_header'], overwrite=True)
# Defie LMfit convolved-sersic function
def Sersic_LMfit(params, pod, cutout, psf, mask, use_fft, lmfit=True): #astropy.io.fits.writeto('/home/saruman/spx7cjc/DustPedia/Sersic.fits', sersic_map, overwrite=True)
# Extract variable parameters
sersic_amplitide = params['sersic_amplitide'].value
sersic_r_eff = params['sersic_r_eff'].value
sersic_n = params['sersic_n'].value
sersic_x_0 = params['sersic_x_0'].value
sersic_y_0 = params['sersic_y_0'].value
sersic_ellip = params['sersic_ellip'].value
sersic_theta = params['sersic_theta'].value
# Generate sersic model given current paramters
sersic_x, sersic_y = np.meshgrid( np.arange(cutout.shape[1]), np.arange(cutout.shape[0]) )
sersic_model = astropy.modeling.models.Sersic2D(amplitude=sersic_amplitide, r_eff=sersic_r_eff, n=sersic_n, x_0=sersic_x_0, y_0=sersic_y_0, ellip=sersic_ellip, theta=sersic_theta)
sersic_map = sersic_model( sersic_x, sersic_y )
# Convolve sersic model with PSF
if use_fft==True:
conv_map = astropy.convolution.convolve_fft(sersic_map, psf, normalize_kernel=True)
elif use_fft==False:
conv_map = astropy.convolution.convolve(sersic_map, psf, normalize_kernel=True)
# Calculate residuals, filtered by mask
residuals = cutout - conv_map
mask[ np.where(mask==0.0) ] = np.nan
residuals *= mask
residuals.flatten()
residuals = residuals[ np.where( np.isnan(residuals)==False ) ]
# Return residuals
if lmfit==True:
return residuals**2.0
elif lmfit==False:
return residuals, cutout-conv_map
# Define function that performs extinction correction on photometry, via ChrisFuncs function that calls IRSA dust extinction service (which uses the Schlafly & Finkbeiner 2011 prescription)
def ExtCorrrct(pod, source_dict, band_dict, kwargs_dict):
# Run source details through function
try:
irsa_band_excorr = ChrisFuncs.ExtCorrrct(source_dict['ra'], source_dict['dec'], band_dict['band_name'], verbose=kwargs_dict['verbose'], verbose_prefix='['+pod['id']+'] ')
except:
if kwargs_dict['debug']==True:
if kwargs_dict['verbose']: print '['+pod['id']+'] Extinction correction failed; entering debug mode.'
pdb.set_trace()
else:
irsa_band_excorr = np.NaN
# Update photometry with extinction corrections
pod['ap_sum'] *= irsa_band_excorr
pod['ap_error'] *= irsa_band_excorr
# Report and return extinction-correced photometry
irsa_band_excorr_mag = 2.51*np.log10(irsa_band_excorr)
if kwargs_dict['verbose']: print '['+pod['id']+'] Applying Galactic extinction correction of '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(irsa_band_excorr_mag,4))+' mag (ie, factor of '+str(ChrisFuncs.FromGitHub.randlet.ToPrecision(irsa_band_excorr,4))+').'
return pod
# Define function that check is present band is to be excluded from photometry
def ExcludePhotom(pod, source_dict, band_dict, kwargs_dict):
# Check if the photometry exclusion field actually contains characters; if so, make list of entries, and if not, record an empty list
if isinstance(source_dict['photom_bands_exclude'], basestring):
aperture_bands_exclude = source_dict['photom_bands_exclude'].split(';')
elif source_dict['photom_bands_exclude']==False:
aperture_bands_exclude = []
# If present band is to be excluded, note this fact in pod
if band_dict['band_name'] in aperture_bands_exclude:
pod['band_exclude'] = True
# If exclusion not required, record and return
else:
pod['band_exclude'] = False
return pod
# Create photometry output dictionry containing null values
output_dict = {'band_name':band_dict['band_name'], 'ap_sum':np.NaN, 'ap_error':np.NaN}
pod['null_output_dict'] = output_dict
# Return pod
if pod['verbose']: print '['+pod['id']+'] No photometry required from this source in this band.'
return pod
# Define function to check that all bands which were supposed to undergo photometry have done so
def PhotomCheck(photom_attempts, photom_output_list, source_dict, bands_dict, kwargs_dict):
# Compare number of bands with photometry returned to number of bands for which photometry was requested
photom_limit = 3
if len(photom_output_list)==len(bands_dict.keys()):
photom_attempts = 'Complete'
return photom_attempts, photom_output_list
# Check how many attempts have been made so far, and proceed accordingly
else:
photom_attempts += 1
time.sleep(30.0)
if photom_attempts>=photom_limit:
print '['+source_dict['name']+'] Photometry failed '+str(photom_limit)+' times in succession; suggest debugging.' #raise Exception('Photometry failed '+str(photom_limit)+' times in succession; suggest debugging.')
if kwargs_dict['debug']:
pdb.set_trace()
# If debugging not enabled, record null photometry for missing bands
else:
photom_attempts = 'Complete'
photom_output_list_revised = copy.deepcopy(photom_output_list)
good_bands = []
[ good_bands.append(photom_output['band_name']) for photom_output in photom_output_list ]
all_bands = []
for band in bands_dict.keys():
all_bands.append(bands_dict[band]['band_name'])
for all_band in all_bands:
if all_band not in good_bands:
photom_output_list_revised.append( {'band_name':all_band, 'ap_sum':np.NaN, 'ap_error':np.NaN} )
return photom_attempts, photom_output_list_revised
# Increment up check and try again
else:
return photom_attempts, photom_output_list
# Define function that handles bands excluded from photometry, so that they appear in thumbnail grid
def ExcludedThumb(source_dict, bands_dict, kwargs_dict):
# If thumbnails not required, end immediately
if kwargs_dict['thumbnails']==False:
return
# Check if the photometry exclusion field for this source actually contains characters; if so make list of entries, else produce empty list
if isinstance(source_dict['photom_bands_exclude'], basestring):
photom_bands_exclude = source_dict['photom_bands_exclude'].split(';')
else:
photom_bands_exclude = []
# Now consider bands which have been assigned a blancket photometry exclusion
photom_bands_exclude = list( set( photom_bands_exclude ) )
photom_bands_exclude = np.array(photom_bands_exclude)[ np.in1d( photom_bands_exclude, bands_dict.keys() ) ]
# If no bands require processing here, end immediately; else prepare to loop over bands that do require processing
if len(photom_bands_exclude)==0:
return
else:
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Preparing thumbnail data for bands excluded from photometry.'
random.shuffle(photom_bands_exclude)
# Find largest beam size and outer annulus size, for later usein thumbnail generation
beam_arcsec_max = 0.0
outer_annulus_max = 0.0
pix_arcsec_max = 0.0
for band_name in bands_dict:
band_fitspath, band_file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, bands_dict[band_name], kwargs_dict)
if not band_file_found:#os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')):
continue
band_pix_matrix = astropy.wcs.WCS(astropy.io.fits.getheader(band_fitspath)).pixel_scale_matrix
band_pix_arcsec = 3600.0 * np.sqrt( np.min(np.abs(band_pix_matrix))**2.0 + np.max(np.abs(band_pix_matrix))**2.0 )
if band_pix_arcsec>pix_arcsec_max:
pix_arcsec_max = band_pix_arcsec
if bands_dict[band_name]['beam_arcsec']>beam_arcsec_max:
beam_arcsec_max = bands_dict[band_name]['beam_arcsec']
if bands_dict[band_name]['annulus_outer']>outer_annulus_max:
outer_annulus_max = bands_dict[band_name]['annulus_outer']
source_dict['beam_arcsec_max'] = beam_arcsec_max
source_dict['outer_annulus_max'] = outer_annulus_max
source_dict['pix_arcsec_max'] = pix_arcsec_max
# In standard operation, process multiple sources in parallel
if kwargs_dict['parallel']==True:
ex_ap_pool = mp.Pool(processes=kwargs_dict['n_proc'])
for band in photom_bands_exclude:
ex_ap_pool.apply_async( ExcludedSubpipelinePhotom, args=(source_dict, bands_dict[band], kwargs_dict,) )
ex_ap_pool.close()
ex_ap_pool.join()
del(ex_ap_pool)
# If parallelisation is disabled, process sources one-at-a-time
elif kwargs_dict['parallel']==False:
for band in photom_bands_exclude:
ExcludedSubpipelinePhotom(source_dict, bands_dict[band], kwargs_dict)
# Define 'pseudo-dummy' version of the photometry sub-pipeline, to run excluded bands through
def ExcludedSubpipelinePhotom(source_dict, band_dict, kwargs_dict_inviolate):
source_id = source_dict['name']+'_'+band_dict['band_name']
# Make deep copy of kwargs dict, to disable verbosity
kwargs_dict = copy.deepcopy(kwargs_dict_inviolate)
kwargs_dict['verbose'] = False
# Run through initial stages of aperture sub-pipeline, as would occur usually
in_fitspath_prelim, file_found = CAAPR.CAAPR_Pipeline.FilePrelim(source_dict, band_dict, kwargs_dict)
if file_found == False:
return
pod = CAAPR.CAAPR_Pipeline.PodInitiate(in_fitspath_prelim, source_dict, band_dict, kwargs_dict)
pod = CAAPR.CAAPR_Pipeline.MapPrelim(pod, source_dict, band_dict)
if pod['within_bounds']==False:
return
pod = AperturePrelim(pod, source_dict, band_dict, kwargs_dict)
pod = CAAPR.CAAPR_Pipeline.PolySub(pod, pod['adj_semimaj_pix'], pod['adj_axial_ratio'], pod['adj_angle'], instant_quit=(not kwargs_dict['polysub']))
CAAPR.CAAPR_IO.MemCheck(pod)
# Use thumbnail cutout function to create a cutout that's only as large as it needs to be for the thumbnail grid
semimaj_arcsec = np.sqrt( pod['adj_semimaj_arcsec']**2.0 - band_dict['beam_arcsec']**2.0 )
thumb_rad_arcsec = np.ceil( 1.0 * source_dict['pix_arcsec_max'] ) + np.ceil( 1.75 * 0.5 * np.sqrt( (source_dict['outer_annulus_max']*2.0*semimaj_arcsec)**2.0 + (source_dict['beam_arcsec_max'])**2.0 ) )
source_dict['thumb_rad_arcsec'] = thumb_rad_arcsec
CAAPR.CAAPR_IO.ThumbCutout(source_dict, band_dict, kwargs_dict, pod['in_fitspath'], thumb_rad_arcsec)
# Rename thumbnail cutout, and make it the 'active' map by repeating necessary processing
thumb_output = os.path.join( kwargs_dict['temp_dir_path'], 'Processed_Maps', source_id+'_Thumbnail.fits' )
pod['in_fitspath'] = thumb_output
in_fitsdata = astropy.io.fits.open(pod['in_fitspath'])
pod['in_image'] = in_fitsdata[0].data
pod['in_header'] = in_fitsdata[0].header
in_fitsdata.close()
pod['in_wcs'] = astropy.wcs.WCS(pod['in_header'])
pod['in_fitspath_size'] = float(os.stat(pod['in_fitspath']).st_size)
thumb_centre_xy = pod['in_wcs'].wcs_world2pix( np.array([[ source_dict['ra'], source_dict['dec'] ]]), 0 )
pod['centre_i'], pod['centre_j'] = float(thumb_centre_xy[0][1]), float(thumb_centre_xy[0][0])
# Run thumbnail cutout thorugh AstroMagic (deleting any pre-existing data), save result, and remove temporary files
if kwargs_dict['starsub']==True:
pod['cutout'] = pod['in_image'].copy()
pod['starsub_thumbnail'] = True
if os.path.exists( os.path.join( kwargs_dict['temp_dir_path'], 'AstroMagic', band_dict['band_name'], source_dict['name']+'_'+band_dict['band_name']+'_StarSub.fits') ):
os.remove( os.path.join( kwargs_dict['temp_dir_path'], 'AstroMagic', band_dict['band_name'], source_dict['name']+'_'+band_dict['band_name']+'_StarSub.fits') )
pod = CAAPR.CAAPR_AstroMagic.Magic(pod, source_dict, band_dict, kwargs_dict)
os.remove(thumb_output)
magic_output = os.path.join(kwargs_dict['temp_dir_path'], 'AstroMagic', band_dict['band_name'], source_dict['name']+'_'+band_dict['band_name']+'_StarSub.fits')
if os.path.exists(magic_output):
os.remove(magic_output)
else:
pod['cutout'] = pod['in_image'].copy()
os.remove(thumb_output)
# Save resulting cutout
astropy.io.fits.writeto(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_id+'.fits'), pod['cutout'], header=pod['in_header'], overwrite=True)
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_Photom/CAAPR_Photom.py
|
Python
|
mit
| 58,770
|
[
"Galaxy"
] |
a645da4eebfc17496a239e0d86efc47997aa14aefa9bf70a7e2c2883e43b6ebc
|
#!/usr/bin/env python
import sys
try:
import moose
except ImportError:
print "ERROR: Could not import moose. Please add the directory containing moose.py in your PYTHONPATH"
import sys
sys.exit(1)
has_pylab = True
try:
import pylab
from matplotlib import numpy
except ImportError:
print 'Could not import pylab. Will only save ascii files. Install matplotlib to see plots directly.'
RUNTIME = 200.0
SIMDT = 1.0
# Pulse generator with trigger mode = 0 This is free running - and
# creates a series of pulses
pulse0 = moose.PulseGen("/pulse0")
pulse0.firstLevel = 50.0
pulse0.firstWidth = 3.0
pulse0.firstDelay = 5.0
pulse0.secondLevel = -20.0
pulse0.secondWidth = 5.0
pulse0.secondDelay = 8.0
pulse0.baseLevel = 10.0
pulse0.trigMode = 0
trig = moose.PulseGen("/trig")
trig.firstLevel = 20.0
trig.firstWidth = 1.0
trig.firstDelay = 5.0
trig.secondWidth = 30.0
# Pulse generator with trigger mode = 1
pulse1 = moose.PulseGen("/pulse1")
pulse1.firstLevel = 50.0
pulse1.firstWidth = 3.0
pulse1.firstDelay = 5.0
pulse1.secondLevel = -20.0
pulse1.secondWidth = 5.0
pulse1.secondDelay = 8.0
pulse1.baseLevel = 10.0
pulse1.trigMode = 1
pulse1.trigTime = 0.0
trig.connect("outputSrc", pulse1, "input")
# Gated pulse
gate = moose.PulseGen("/gate")
gate.firstLevel = 20.0
gate.firstWidth = 30.0
gate.firstDelay = 15.0
gate.secondWidth = 30.0
# Pulse generator with trigger mode = 2
pulse2 = moose.PulseGen("/pulse2")
pulse2.firstLevel = 50.0
pulse2.firstWidth = 3.0
pulse2.firstDelay = 5.0
pulse2.secondLevel = -20.0
pulse2.secondWidth = 5.0
pulse2.secondDelay = 8.0
pulse2.baseLevel = 10.0
pulse2.trigMode = 2
gate.connect("outputSrc", pulse2, "input")
plot0 = moose.Table("/plot0")
plot0.stepMode = 3
plot0.connect("inputRequest", pulse0, "output")
plot1 = moose.Table("/plot1")
plot1.stepMode = 3
plot1.connect("inputRequest", pulse1, "output")
plot2 = moose.Table("/plot2")
plot2.stepMode = 3
plot2.connect("inputRequest", pulse2, "output")
plotGate = moose.Table("/plotGate")
plotGate.stepMode = 3
plotGate.connect("inputRequest", gate, "output")
plotTrig = moose.Table("/plotTrig")
plotTrig.stepMode = 3
plotTrig.connect("inputRequest", trig, "output")
context = moose.PyMooseBase.getContext()
context.useClock(0, "/#[TYPE=PulseGen]")
context.useClock(1, "/#[TYPE=Table]")
context.setClock(0, SIMDT)
context.setClock(1, SIMDT)
context.reset()
context.step(RUNTIME)
plot0.dumpFile("pulse0.plot")
plot1.dumpFile("pulse1.plot")
plot2.dumpFile("pulse2.plot")
plotGate.dumpFile("gate.plot")
plotTrig.dumpFile("trig.plot")
if has_pylab:
fig = pylab.figure()
pylab.subplot(511)
pylab.plot(numpy.array(plot0))
pylab.title('Free Run')
pylab.subplot(512)
pylab.plot(numpy.array(plot1))
pylab.title('Triggered (below)')
pylab.subplot(513)
pylab.plot(numpy.array(plotTrig))
pylab.title('Free Running Trigger')
pylab.subplot(514)
pylab.plot(numpy.array(plot2))
pylab.title('Gated (below)')
pylab.subplot(515)
pylab.plot(numpy.array(plotGate))
pylab.title('Free Running Gate')
pylab.show()
print '----------------------------------------------------------'
print 'pulsegen.py: data saved in pulse0.plot, pulse1.plot, pulse2.plot, gate.plot and trig.plot'
print "pulsegen.py: finished simulation"
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/pulsegen.py
|
Python
|
lgpl-2.1
| 3,288
|
[
"MOOSE"
] |
2d90b38eccbba575f7d52eb16c3f4712d4122030d0b229b3bd9c608bc81c930f
|
# -*- coding: utf-8 -*-
#
# vmd.training.pers documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 2 09:48:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinxcontrib.gist',
'sphinxcontrib.images'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
def setup(app):
app.add_stylesheet("theme_overrides.css")
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'vmd.training.pers'
copyright = u'2015, Sandra & Sven'
author = u'Sandra & Sven'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'logo_only': True,}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = True
html_title = "Milieudefensie Webtraining basics"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "Webtraining"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/bagel.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#html_style = 'theme_overrides.css'
#def setup(app):
# app.add_stylesheet("theme_overrides.css")
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'vmdtrainingpersdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'vmdtrainingpers.tex', u'vmd.training.pers Documentation',
u'Sandra \\& Sven', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vmdtrainingpers', u'vmd.training.pers Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'vmdtrainingpers', u'vmd.training.pers Documentation',
author, 'vmdtrainingpers', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
milieudefensie/vmd.training.pers
|
source/conf.py
|
Python
|
gpl-2.0
| 11,648
|
[
"VMD"
] |
da245260ef2c5277322ebe1fc1b30cdf436683957ee0e0d9e5b63a747dc355b1
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration, CertificateWhitelist, GeneratedCertificate
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument,redefined-outer-name
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
certificate_exception_url = reverse(
'create_certificate_exception',
kwargs={'course_id': unicode(course_key), 'white_list_student': ''}
)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_exception_url': certificate_exception_url
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses': GeneratedCertificate.get_unique_statuses(course_key=course.id),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name=u'{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
ahmadiga/min_edx
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 29,833
|
[
"VisIt"
] |
b3417377fde7ee96005d494ba4de6c018f420b82c75e2af82ffad528924ce10f
|
import numpy as np
import mymath as Mmath
import scipy.optimize as spo
import mydictionaries as Mdict
r0 = 2**(1/6) # equilibrium distance for nearest neighbor
npoints = 3000
rmin, rmax = 0.1, 2.0 # range for tabulated potential
rvecmaster = np.linspace(rmin,rmax,npoints)
r1bartry = 1.05 # use morse to 5% strain
r1bartrywarner = 1.1 # use morse to 10% strain
# r2bartry = np.sqrt(17/12) # second-nearest neighbor distance in USF configuration
r2bartry = np.sqrt(3/2) # second-nearest neighbor distance in USF configuration
r3bartry = np.sqrt(2)-0.02 # second-nearest neighbor distance in equilibrium configuration, avoiding compression issue
alphabartry = 6.5*r0
# summary of properties for brittle, ductile potentials (from simple lammps tests)
# these are not necessary for functioning of this module, but can be used by other modules
keys = ['brittle','inter4','inter3','inter2','inter1','ductile']
# cutoff -0.02 (new potential #1)
epsvecnew1 = [0.009,0.077,0.143,0.207,0.266,0.318]
usfvecnew1 = [0.8818,0.7344,0.6024,0.4816,0.3758,0.2824]
epsvecdict = {'new1': epsvecnew1}
usfvecdict = {'new1': usfvecnew1}
# potential with incorrect r2bar = sqrt(17/12)
# epsvecnew1 = [0.01,0.17,0.247,0.313,0.372,0.422]
# usfvecnew1 = [0.9165,0.7667,0.6299,0.5061,0.3952,0.3013]
def getAll(rvec=rvecmaster,style='New',extend=False,**kwargs): # pass ep for New Potential; pass r3bar for Warner
if style == 'Warner': # old
paramsfun, energyfun = solveEqnsWarner, getEnergyWarner
elif style == 'New':
paramsfun, energyfun = solveEqnsNew, getEnergyNew
paramsdict = paramsfun(**kwargs)
energyvec = np.array(getEnergyLoop(rvec/r0,paramsdict,energyfun))
forcevec = Mmath.getDiffVec(rvec,-energyvec)
if extend:
rvec, energyvec, forcevec = extendToZero(rvec,energyvec,forcevec)
numvec = np.arange(np.shape(forcevec)[0])+1
return np.column_stack((numvec,rvec,energyvec,forcevec)), paramsdict
def getEnergyLoop(rbarvec,paramsdict,fun):
return [fun(rbar,**paramsdict) for rbar in rbarvec]
def extendToZero(rvec,energyvec,forcevec,tol=1.e-8):
rvecnew = np.insert(rvec,0,tol)
forcevecnew = np.insert(forcevec,0,forcevec[0])
energy0 = forcevec[0]*(rvec[0]-tol) + energyvec[0] # linear extrapolation
energyvecnew = np.insert(energyvec,0,energy0)
return rvecnew, energyvecnew, forcevecnew
def solveEqnsNew(ep,r1bar=r1bartry,r2bar=r2bartry,r3bar=r3bartry,alphabar=alphabartry,offset=0,step=0.01,tol=1.e-8,r3barfac=0.98,n1=3,n2=3):
# largest cutoff radius possible is r3bar
# smallest cutoff radius possible is r2bar
# tries to solve for largest cutoff radius possible
# constructs smooth potential so that value of potential at r2bar is -ep
# n1, n2 are order of splines
while r3barfac > 0:
r3barcurr = r2bar + (r3bar - r2bar)*r3barfac
func = lambda coeff: solveEqnsSub(coeff,ep,r1bar,r2bar,r3barcurr,alphabar,offset,n1,n2)
coeffsol = spo.fsolve(func,np.zeros(n1+n2+2))
spline1, spline2 = coeffsol[0:n1+1], coeffsol[n1+1:n1+n2+2]
maxval2 = Mmath.maxPolyRoot(spline2)
if maxval2 > tol:
r3barfac = r3barfac - step
else:
keyslist = ['spline1','spline2','r1bar','r2bar','r3bar','alphabar','offset']
valslist = [spline1,spline2,r1bar,r2bar,r3barcurr,alphabar,offset]
return Mdict.instantiate(keyslist,valslist)
print('Bad input')
def solveEqnsSub(coeff,ep,r1bar,r2bar,r3bar,alphabar,offset,n1,n2):
spline1, spline2 = coeff[0:n1+1], coeff[n1+1:n1+n2+2]
dspline1, dspline2 = np.polyder(spline1), np.polyder(spline2)
ddspline1, ddspline2 = np.polyder(dspline1), np.polyder(dspline2)
energy1 = morse(r1bar,alphabar,offset)
denergy1 = morseD(r1bar,alphabar)
f = np.zeros(n1+n2+2)
f[0] = np.polyval(spline1,r1bar) - energy1
f[1] = np.polyval(dspline1,r1bar) - denergy1
f[2] = np.polyval(spline1,r2bar) - (-ep)
f[3] = np.polyval(spline2,r2bar) - (-ep)
f[4] = np.polyval(dspline1,r2bar) - np.polyval(dspline2,r2bar)
f[5] = np.polyval(spline2,r3bar)
f[6] = np.polyval(dspline2,r3bar)
f[7] = np.polyval(ddspline1,r2bar) - np.polyval(ddspline2,r2bar) # unnecessary, but nice to have
return np.array(f)
def getEnergyNew(rbar,spline1,spline2,r1bar,r2bar,r3bar,alphabar,offset):
if rbar < r1bar: # use morse
return morse(rbar,alphabar,offset)
elif rbar < r2bar: # use spline1
return np.polyval(spline1,rbar)
elif rbar < r3bar: # use spline2
return np.polyval(spline2,rbar)
else:
return 0
def writeToFile(potentialname,data,paramsdict,filename=None,style='New',writeoption='w',writeformat='%d %10.8f %10.8f %10.8f'): # use 'a' for append, 'w' for (over)write
if filename is None:
filename = potentialname
with open(filename,writeoption) as f:
ndatapoints = np.shape(data)[0]
writeHeader(potentialname,ndatapoints,f,paramsdict,style)
with open(filename,'ab') as fb:
np.savetxt(fb,data,fmt=writeformat)
def writeHeader(potentialname,ndatapoints,f,paramsdict,style):
f.write('\n\n') # pad with blank lines for readability if multiple potentials
if style == 'New':
writeParamsNew(f,**paramsdict)
elif style == 'Warner':
writeParamsWarner(f,**paramsdict)
f.write(potentialname + '\n')
f.write('N %d \n \n' % ndatapoints)
def writeParamsNew(f,r1bar,r2bar,r3bar,alphabar,offset,**kwargs):
f.write('# Tabulated potential, r1 = %f, r2 = %f, r3 = %f, offset = %f, alpha = %f \n \n' % (r1bar*r0,r2bar*r0,r3bar*r0,offset,alphabar/r0))
def morse(rbar,alphabar,offset=0):
return (np.exp(alphabar*(1 - rbar)) - 1)**2 - 1 - offset
def morseD(rbar,alphabar):
return -2*alphabar*np.exp(alphabar*(1 - rbar))*(np.exp(alphabar*(1 - rbar)) - 1)
# analogous functions for Warner potential...
def solveEqnsWarner(r3bar,r1bar=r1bartrywarner,alphabar=alphabartry,offset=0): # as r3bar (cutoff) increases, potential changes from brittle to ductile; brittle - r3bar = r2bartry; ductile - r3bar = 0.99*r3bartry (values outside bounds lead to bad potentials)
func = lambda coeff: solveEqnsSubWarner(coeff,r1bar,r3bar,alphabar,offset)
coeffsol = spo.fsolve(func,np.zeros(4))
keyslist = ['spline1','r1bar','r3bar','alphabar','offset']
valslist = [coeffsol,r1bar,r3bar,alphabar,offset]
return Mdict.instantiate(keyslist,valslist)
def solveEqnsSubWarner(spline1,r1bar,r3bar,alphabar,offset):
dspline1 = np.polyder(spline1)
ddspline1 = np.polyder(dspline1)
energy1 = morse(r1bar,alphabar,offset)
denergy1 = morseD(r1bar,alphabar)
f1 = np.polyval(spline1,r1bar) - energy1
f2 = np.polyval(dspline1,r1bar) - denergy1
f3 = np.polyval(spline1,r3bar)
f4 = np.polyval(dspline1,r3bar)
return np.array([f1,f2,f3,f4])
def getEnergyWarner(rbar,spline1,r1bar,r3bar,alphabar,offset): # old method (Warner potential)
if rbar < r1bar: # use morse
return morse(rbar,alphabar,offset)
elif rbar < r3bar: # use spline
return np.polyval(spline1,rbar)
else:
return 0
def writeParamsWarner(f,r1bar,r3bar,alphabar,offset,**kwargs):
f.write('# Tabulated potential, r1 = %f, r3 = %f, offset = %f, alpha = %f \n \n' % (r1bar*r0,r3bar*r0,offset,alphabar/r0))
|
varun-rajan/python-modules
|
Obsolete/mdutilities_warnerpotential3d.py
|
Python
|
gpl-2.0
| 7,339
|
[
"LAMMPS"
] |
3fe64bedc04ea6beed70e6fb5c69d907cb77b3de70546360ab51c6e2932a9ede
|
import matplotlib.pyplot as plt
#%matplotlib inline
import nengo
import numpy as np
import scipy.ndimage
import matplotlib.animation as animation
from matplotlib import pylab
from PIL import Image
import nengo.spa as spa
import cPickle
from nengo_extras.data import load_mnist
from nengo_extras.vision import Gabor, Mask
#Encode categorical integer features using a one-hot aka one-of-K scheme.
def one_hot(labels, c=None):
assert labels.ndim == 1
n = labels.shape[0]
c = len(np.unique(labels)) if c is None else c
y = np.zeros((n, c))
y[np.arange(n), labels] = 1
return y
rng = np.random.RandomState(9)
# --- load the data
img_rows, img_cols = 28, 28
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = 2 * X_train - 1 # normalize to -1 to 1
X_test = 2 * X_test - 1 # normalize to -1 to 1
train_targets = one_hot(y_train, 10)
test_targets = one_hot(y_test, 10)
# --- set up network parameters
#Want to encode and decode the image
n_vis = X_train.shape[1]
n_out = X_train.shape[1]
#number of neurons/dimensions of semantic pointer
n_hid = 5000 #Try with more neurons for more accuracy
#n_hid = 1000
#Want the encoding/decoding done on the training images
ens_params = dict(
eval_points=X_train,
neuron_type=nengo.LIFRate(), #Why not use LIF?
intercepts=nengo.dists.Choice([-0.5]),
max_rates=nengo.dists.Choice([100]),
)
#Least-squares solver with L2 regularization.
solver = nengo.solvers.LstsqL2(reg=0.01)
#solver = nengo.solvers.LstsqL2(reg=0.0001)
solver2 = nengo.solvers.LstsqL2(reg=0.01)
#network that
with nengo.Network(seed=3) as model:
a = nengo.Ensemble(n_hid, n_vis, seed=3, **ens_params)
v = nengo.Node(size_in=n_out)
conn = nengo.Connection(
a, v, synapse=None,
eval_points=X_train, function=X_train,#want the same thing out
solver=solver)
'''
v2 = nengo.Node(size_in=train_targets.shape[1])
conn2 = nengo.Connection(
a, v2, synapse=None,
eval_points=X_train, function=train_targets, #Want to get the labels out
solver=solver2)
'''
def get_outs(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return np.dot(acts, sim.data[conn2].weights.T)
'''
def get_error(sim, images, labels):
return np.argmax(get_outs(sim, images), axis=1) != labels
def get_labels(sim,images):
return np.argmax(get_outs(sim, images), axis=1)
'''
#Get the neuron activity of an image or group of images (this is the semantic pointer in this case)
def get_activities(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return acts
def get_encoder_outputs(sim,images):
outs = np.dot(images,sim.data[a].encoders.T) #before the neurons Why transpose?
return outs
def intense(img):
newImg = img.copy()
newImg[newImg < 0] = -1
newImg[newImg > 0] = 1
return newImg
def filtered(img):
return intense(scipy.ndimage.gaussian_filter(img, sigma=1))
#Images to train, starting at random orientation
orig_imgs = X_train[:100000].copy()
for img in orig_imgs:
img[:] = filtered(scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),
(np.random.randint(360)),reshape=False,mode="nearest").ravel())
degrees = -6
#Images rotated a fixed amount from the original random orientation
rotated_imgs =orig_imgs.copy()
for img in rotated_imgs:
img[:] = filtered(scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),degrees,reshape=False,mode="nearest").ravel())
#^encoder outputs
'''#Images not used for training, but for testing (all at random orientations)
test_imgs = X_test[:1000].copy()
for img in test_imgs:
img[:] = scipy.ndimage.interpolation.rotate(np.reshape(img,(28,28)),
(np.random.randint(360)),reshape=False,mode="nearest").ravel()
'''
# linear filter used for edge detection as encoders, more plausible for human visual system
encoders = Gabor().generate(n_hid, (11, 11), rng=rng)
encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)
#Set the ensembles encoders to this
a.encoders = encoders
#Check the encoders were correctly made
#plt.imshow(encoders[0].reshape(28, 28), vmin=encoders[0].min(), vmax=encoders[0].max(), cmap='gray')
with nengo.Simulator(model) as sim:
#Neuron activities of different mnist images
#The semantic pointers
orig_acts = get_activities(sim,orig_imgs)
#rotated_acts = get_activities(sim,rotated_imgs)
#test_acts = get_activities(sim,test_imgs)
#X_test_acts = get_activities(sim,X_test)
#labels_out = get_outs(sim,X_test)
rotated_after_encoders = get_encoder_outputs(sim,rotated_imgs)
#original_after_encoders = get_encoder_outputs(sim,orig_imgs)
#solvers for a learning rule
#solver_tranform = nengo.solvers.LstsqL2(reg=1e-8)
#solver_word = nengo.solvers.LstsqL2(reg=1e-8)
solver_rotate_encoder = nengo.solvers.LstsqL2(reg=1e-8)
#solver_identity_encoder = nengo.solvers.LstsqL2(reg=1e-8)
#find weight matrix between neuron activity of the original image and the rotated image
#weights returns a tuple including information about learning process, just want the weight matrix
#weights,_ = solver_tranform(orig_acts, rotated_acts)
#find weight matrix between labels and neuron activity
#label_weights,_ = solver_word(labels_out,X_test_acts)
rotated_after_encoder_weights,_ = solver_rotate_encoder(orig_acts,rotated_after_encoders)
#identity_after_encoder_weights,_ = solver_identity_encoder(orig_acts,original_after_encoders)
#filename = "label_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(label_weights, open( filename, "wb" ) )
#filename = "activity_to_img_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(sim.data[conn].weights.T, open( filename, "wb" ) )
#filename = "rotation_weights_clockwise" + str(n_hid) +".p"
#cPickle.dump(weights, open( filename, "wb" ) )
filename = "rotated_after_encoder_weights_clockwise_filter" + str(n_hid) +".p"
cPickle.dump(rotated_after_encoder_weights, open( filename, "wb" ) )
#filename = "identity_after_encoder_weights" + str(n_hid) +".p"
#cPickle.dump(identity_after_encoder_weights, open( filename, "wb" ) )
|
science-of-imagination/nengo-buffer
|
Project/mental_rotation_training_clockwise.py
|
Python
|
gpl-3.0
| 6,347
|
[
"NEURON"
] |
15f7b1ad699203340775b2eb0178842809037b484621b6b5508576347b2b87f2
|
# Copyright (C) 2012,2013,2015,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**************************
espressopp.FixedTripleList
**************************
.. function:: espressopp.FixedTripleList(storage)
:param storage:
:type storage:
.. function:: espressopp.FixedTripleList.add(pid1, pid2, pid3)
:param pid1:
:param pid2:
:param pid3:
:type pid1:
:type pid2:
:type pid3:
:rtype:
.. function:: espressopp.FixedTripleList.addTriples(triplelist)
:param triplelist:
:type triplelist:
:rtype:
.. function:: espressopp.FixedTripleList.getTriples()
:rtype:
.. function:: espressopp.FixedTripleList.size()
:rtype:
.. function:: espressopp.FixedTripleList.remove()
remove the FixedPairList and disconnect
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class FixedTripleListLocal(_espressopp.FixedTripleList):
def __init__(self, storage):
if pmi.workerIsActive():
cxxinit(self, _espressopp.FixedTripleList, storage)
def add(self, pid1, pid2, pid3):
if pmi.workerIsActive():
return self.cxxclass.add(self, pid1, pid2, pid3)
def addTriples(self, triplelist):
"""
Each processor takes the broadcasted triplelist and
adds those triples whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for triple in triplelist:
pid1, pid2, pid3 = triple
self.cxxclass.add(self, pid1, pid2, pid3)
def size(self):
if pmi.workerIsActive():
return self.cxxclass.size(self)
def remove(self):
if pmi.workerIsActive():
self.cxxclass.remove(self)
'''
def addTriples(self, triplelist):
"""
Each processor takes the broadcasted triplelist and
adds those triples whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for triple in triplelist:
pid1, pid2, pid3 = triple
self.cxxclass.add(self, pid1, pid2, pid3)
'''
def getTriples(self):
if pmi.workerIsActive():
triples = self.cxxclass.getTriples(self)
return triples
if pmi.isController:
class FixedTripleList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.FixedTripleListLocal',
localcall = [ "add" ],
pmicall = [ "addTriples","remove" ],
pmiinvoke = ["getTriples", "size"]
)
|
kkreis/espressopp
|
src/FixedTripleList.py
|
Python
|
gpl-3.0
| 3,429
|
[
"ESPResSo"
] |
65e4a18c9dc97b58074d5a4250a6a9d6c8fb66614a925563427bcb2415d1e14a
|
#!/usr/bin/env python
"""verification_utils.py:
IT contains a class which runs tests on moose internal data-structures to
check if it is good for simulation.
Last modified: Sun Feb 15, 2015 12:21PM
"""
from __future__ import print_function
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import sys
from . import _moose as moose
import unittest
import inspect
from . import print_utils as debug
import numpy as np
from .backend import backend
class MooseTestCase( unittest.TestCase ):
def dump(self, msg, end=''):
''' Dump the messages in test functions '''
caller = inspect.stack()[1][3]
if type(msg) == list:
msg = '\n\t|- '.join(msg)
print(('[VERIFY] {:80s}[{}]'.format(msg, caller)))
def setUp(self):
'''Initialize storehouse
'''
if not backend.moose_elems.filled:
backend.moose_elems.populateStoreHouse()
self.mooseElems = backend.moose_elems
self.nonZeroClockIds = None
def test_disconnected_compartments(self):
'''Test if any comparment is not connected '''
self.dump("Checking if any compartment is not connected ...")
for c in self.mooseElems.compartments:
if (c.neighbors['axial'] or c.neighbors['raxial']):
continue
elif c.neighbors['injectMsg']:
continue
else:
msg = '%s is not connected with any other compartment' % c.path
debug.dump('FAIL'
, [ msg
, 'Did you forget to use `moose.connect`?'
]
)
def test_isolated_pulse_gen(self):
''' Test if any pulse-generator is not injecting current to a
compartment
'''
self.dump('Checking if any pulse-generator is floating')
for pg in self.mooseElems.pulseGens:
if pg.neighbors['output']:
continue
else:
debug.dump(
'FAIL'
, [ 'Current source {} is floating'.format(pg.path)
, 'It is not injecting current to any compartment'
, 'Perhaps you forgot to use `moose.connect`?'
]
)
def test_synchans(self):
self.dump("Checking if any synapse is dead")
for synchan in self.mooseElems.synchans:
if synchan.Gbar <= 0.0:
debug.dump("WARN"
, [ synchan.path
, "Gbar value is zero or negative: %s" % synchan.Gbar
, "Not cool!"
]
)
# Check the output of synchan.
if not synchan.neighbors['channel']:
debug.dump("FAIL"
, [ "SynChan %s is not connected to post-compartment" % synchan.path
, " No connected 'channel'. "
" Did you forget to connect compartment e.g."
"moose.connect(synchan, 'channel', comp, 'channel')"
" where synchan is 'moose.SynChan' and comp is "
" 'moose.Compartment'?"
]
)
else:
pass
# Check if anyone is activating this synchan.
synhandlers = synchan.neighbors['activation']
if not synhandlers:
debug.dump("FAIL"
, [ "No SynHandler is activating SynChan %s" % synchan.path
, " Did you forget to connect a SynHandler e.g. "
"moose.connect(synHandler, 'activationOut', synchan, 'activation'"
" where synchan is 'moose.SynChan' and synHandler is"
" moose.SynHandler."
]
)
else: [self.test_synhandler(x) for x in synhandlers]
def test_synhandler(self, synhandlers):
"""A SynHandler object does not have incoming connections to itself.
Rather it keeps an array of moose.Synapse inside it which recieves input
of moose.SpikeGen.
"""
if type(synhandlers) == moose.vec:
if len(synhandlers) == 1:
synhandler = synhandlers[0]
else:
[self.test_synhandler(x) for x in synhandlers]
else:
synhandler = synhandlers
for synapses in synhandler.synapse:
self.test_synapse(synapses)
def test_synapse(self, synapses):
if type(synapses) == moose.Synapse:
synapse = synapses
elif type(synapses) == moose.vec:
if len(synapses) == 1:
synapse = synapses[0]
else:
[ self.test_synapse(x) for x in synapses ]
spikeGens = synapse.neighbors['addSpike']
if not spikeGens:
debug.dump('FAIL'
, [" Synapse %s has no incoming spikes" % synapse.path
, " Did you forget to connect a moose.SpikeGen e.g."
" moose.connect(spikegen, 'spikeOut', synapse, 'addSpike')"
]
)
else:
[self.test_spikegen(x) for x in spikeGens]
def test_spikegen(self, spikegens):
spikeGen = None
if len(spikegens) > 1:
[self.test_spikegen(x) for x in spikegens]
elif len(spikegens) == 1:
spikeGen = spikegens[0]
elif type(spikegens) == moose.SpikeGen:
spikeGen = spikegens
pre = spikeGen.neighbors['Vm']
if not pre:
debug.dump('FAIL',
[ "SpikeGen %s is not reading Vm of any compartment " % spikeGen.path
, "Did you forget to connect Vm of a "
"compartment to this SpikeGen? "
" e.g. moose.connect(comp, 'VmOut', spikeGen, 'Vm')"
]
)
else: pass
def test_unused_tables(self):
'''Tests if any table is not reading data. Such tables remain empty.
'''
self.dump('Checking if any table is not connected')
for table in self.mooseElems.tables:
if table.neighbors['requestOut']:
continue
else:
debug.dump(
'FAIL'
, [ 'Table {} is not reading data.'.format(table.path)
, ' Did you forget to use `moose.connect`?'
]
)
def test_clocks(self):
"""Tests if clocks are missing. """
self.dump("Checking if clocks are available")
try:
clock = self.mooseElems.clocks[0]
except:
debug.dump("WARN", "Could not find any clock")
return
clockDtList = clock.dts
if np.count_nonzero(clockDtList) < 1:
debug.dump("FATAL"
, [ "No clock is found with non-zero dt size. "
, "Did you forget to use `moose.setClock` function?"
, "Quitting..."
]
)
sys.exit(0)
else:
self.nonZeroClockIds = np.nonzero(self.mooseElems.clocks)
def test_methods_sensitivity(self):
"""Test if each compartment has process connected to a non-zero clock"""
self.dump("Checking for insensitive processes")
[ self.checkSentitivity( m, objs)
for m in ['process', 'init']
for objs in [self.mooseElems.compartments]
]
[self.checkSentitivity('process', objs)
for objs in [self.mooseElems.tables, self.mooseElems.pulseGens]
]
def checkSentitivity( self, methodName, objectList):
"""Check if a given method is sensitive to any non-zero clock
"""
assert type(methodName) == str
insensitiveObjectList = []
for obj in objectList:
if not obj.neighbors[methodName]:
insensitiveObjectList.append(obj)
else:
# Here we must check if method is made sensitive to a
# zero-clock. Currently there is no way to test it in python.
pass
if len(insensitiveObjectList) > 0:
msgList = [
"Method `%s` is insensitive to all clocks. " % methodName
, "Total {} out of {} object ({}) fails this test".format(
len(insensitiveObjectList)
, len(objectList)
, type(insensitiveObjectList[0])
)
]
debug.dump("FAIL", msgList)
def verify( *args, **kwargs):
'''Verify the current moose setup. Emit errors and warnings
'''
connectivitySuite = unittest.TestSuite()
connectivitySuite.addTest(MooseTestCase('test_disconnected_compartments'))
connectivitySuite.addTest(MooseTestCase('test_isolated_pulse_gen'))
connectivitySuite.addTest(MooseTestCase('test_unused_tables'))
connectivitySuite.addTest(MooseTestCase('test_synchans'))
simulationSuite = unittest.TestSuite()
simulationSuite.addTest(MooseTestCase('test_clocks'))
simulationSuite.addTest(MooseTestCase('test_methods_sensitivity'))
# We can replace self with run also and collect the result into a result
# object.
connectivitySuite.debug()
simulationSuite.debug()
|
rahulgayatri23/moose-core
|
python/moose/verification_utils.py
|
Python
|
gpl-3.0
| 10,069
|
[
"MOOSE"
] |
f1e86205f2a0749564d2f686c88e34f634ec397c32804a3b53b8eaa1d24c5653
|
'''
Created on Apr 28, 2011
@author: mkiyer
'''
from chimerascan import pysam
from math import log10
from string import maketrans
def get_solexa_qual_conversion_table():
"""
return a translation table that can be used by str.translate() for
converting solexa to sanger quality scores
"""
offset = 64
conv_table = ['!'] * 256
conv_table[offset:] = "I" * (256-offset)
for solq in xrange(-5, 40):
phredq = 10*log10(1 + 10**(solq/10.0))
phredchr = chr(int(round(33 + phredq)))
conv_table[offset + solq] = phredchr
conv_string = ''.join(conv_table)
return maketrans(''.join(map(chr, range(256))), conv_string)
def get_illumina_qual_conversion_table():
"""Illumina 1.3+ format can encode a Phred quality score from 0 to 62
using ASCII 64 to 126 (although in raw read data Phred scores from 0
to 40 only are expected).
"""
offset = 64
conv_table = ['!'] * 256
for x in xrange(0, 62):
conv_table[offset+x] = chr(33 + x)
conv_table[offset+40:] = "I" * (256-(offset+40))
conv_string = ''.join(conv_table)
return maketrans(''.join(map(chr, range(256))), conv_string)
def get_sanger_qual_conversion_table():
offset = 33
tbl = map(chr, range(256))
tbl[:offset] = "!" * offset
tbl[offset+40:] = "I" * (256-(offset+40))
return maketrans(''.join(map(chr, range(256))), ''.join(tbl))
conv_tables = {"sanger": get_sanger_qual_conversion_table(),
"illumina": get_illumina_qual_conversion_table(),
"solexa": get_solexa_qual_conversion_table()}
def parse_fastq(line_iter):
with line_iter:
while True:
rid = line_iter.next().rstrip()[1:]
seq = line_iter.next().rstrip()
line_iter.next()
qual = line_iter.next().rstrip()
yield rid, seq, qual
def fastq_to_bam(fastq_files, qual_format, bam_file):
fqfhs = [parse_fastq(open(f)) for f in fastq_files]
qual_trans_table = conv_tables[qual_format]
header = {'HD': {'VN': '1.0', 'SO': 'unknown'}}
# 'SQ': [{'LN': 1, 'SN': 'dummy'}]}
bamfh = pysam.Samfile(bam_file, "wb", header=header)
try:
while True:
for i,fqiter in enumerate(fqfhs):
id,seq,qual = fqiter.next()
a = pysam.AlignedRead()
a.rname = -1
a.mrnm = -1
#a.pos = 0
#a.mpos = 0
a.qname = id
a.seq = seq
a.qual = qual.translate(qual_trans_table)
a.is_read1 = (i == 0)
a.is_read2 = (i == 1)
bamfh.write(a)
except StopIteration:
pass
bamfh.close()
def bam_to_fastq(bam_file, fastq_files):
fqfhs = [open(f, "w") for f in fastq_files]
bamfh = pysam.Samfile(bam_file, "rb")
for r in bamfh:
if r.is_read1:
i = 0
elif r.is_read2:
i = 1
record = "@%s\n%s\n+\n%s" % (r.qname,r.seq,r.qual)
print >>fqfhs[i], record
if __name__ == '__main__':
sol2std = get_solexa_qual_conversion_table()
illumina2std = get_illumina_qual_conversion_table()
import sys
fastq_to_bam(["read1.fq", "read2.fq"], "solexa", "hi.bam")
bam_to_fastq("hi.bam", ["a1.fq", "a2.fq"])
|
genome-vendor/chimerascan
|
chimerascan/lib/fastq_to_bam.py
|
Python
|
gpl-3.0
| 3,324
|
[
"pysam"
] |
633319b282ed1d90048a3c9ed19b41111df37140848758b6e4e52a7cb5ad2775
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for the CJSON writer."""
import json
import os
import unittest
from math import sqrt
import cclib
__filedir__ = os.path.dirname(__file__)
__filepath__ = os.path.realpath(__filedir__)
__datadir__ = os.path.join(__filepath__, "..", "..")
class CJSONWriterTest(unittest.TestCase):
"""Unit tests for the CJSON writer."""
def test_init(self):
"""Does the class initialize correctly?"""
fpath = os.path.join(__datadir__, "data/ADF/basicADF2007.01/dvb_gopt.adfout")
data = cclib.io.ccread(fpath)
cjson = cclib.io.cjsonwriter.CJSON(data)
# The object should keep the ccData instance passed to its constructor.
self.assertEqual(cjson.ccdata, data)
def test_cjson_generation(self):
"""Does the CJSON format get generated properly?"""
fpath = os.path.join(__datadir__, "data/ADF/basicADF2007.01/NH3.adfout")
data = cclib.io.ccread(fpath)
cjson = cclib.io.cjsonwriter.CJSON(data).generate_repr()
# The data available in the cjson and ccdata objects should be equal.
json_data = json.loads(cjson)
number_of_atoms = json_data['properties']['number of atoms']
self.assertEqual(number_of_atoms, data.natom)
dipole_moment = json_data['properties']['total dipole moment']
self.assertAlmostEqual(
dipole_moment,
sqrt(sum(data.moments[1] ** 2))
)
# Ensure the bond connectivity index starts from 0
bonds = json_data.get('bonds', None)
self.assertIsNotNone(bonds)
indices = bonds['connections']['index']
self.assertEqual(min(indices), 0)
self.assertTrue(max(indices) < number_of_atoms)
def test_zero_dipole_moment(self):
"""Does the CJSON writer handle zero dipole moment correctly?"""
fpath = os.path.join(__datadir__, "data/GAMESS/basicGAMESS-US2017/C_bigbasis.out")
data = cclib.io.ccopen(fpath).parse()
cjson = cclib.io.cjsonwriter.CJSON(data).generate_repr()
json_data = json.loads(cjson)
self.assertAlmostEqual(json_data["properties"]['total dipole moment'], 0.0)
def test_missing_dipole_moment(self):
"""Does the CJSON writer handle missing properties correctly?"""
fpath = os.path.join(__datadir__, "data/GAMESS/basicGAMESS-US2017/C_bigbasis.out")
data = cclib.io.ccopen(fpath).parse()
del data.moments
cjson = cclib.io.cjsonwriter.CJSON(data).generate_repr()
json_data = json.loads(cjson)
self.assertFalse("total dipole moment" in json_data["properties"])
if __name__ == "__main__":
unittest.main()
|
langner/cclib
|
test/io/testcjsonwriter.py
|
Python
|
bsd-3-clause
| 2,855
|
[
"ADF",
"GAMESS",
"cclib"
] |
f4bee3221f030ec2e09a298dbd74206f166ddd9f15fb5a2f0c7dba7de7672e39
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RStatmod(RPackage):
"""A collection of algorithms and functions to aid statistical
modeling. Includes growth curve comparisons, limiting dilution
analysis (aka ELDA), mixed linear models, heteroscedastic
regression, inverse-Gaussian probability calculations, Gauss
quadrature and a secure convergence algorithm for nonlinear
models. Includes advanced generalized linear model functions
that implement secure convergence, dispersion modeling and
Tweedie power-law families."""
homepage = "https://cloud.r-project.org/package=statmod"
url = "https://cloud.r-project.org/src/contrib/statmod_1.4.30.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/statmod"
version('1.4.32', sha256='2f67a1cfa66126e6345f8a40564a3077d08f1748f17cb8c8fb05c94ed0f57e20')
version('1.4.30', sha256='9d2c1722a85f53623a9ee9f73d835119ae22ae2b8ec7b50d675401e314ea641f')
depends_on('r@3.0.0:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-statmod/package.py
|
Python
|
lgpl-2.1
| 1,194
|
[
"Gaussian"
] |
10f0adcbc7d44ba1b2a2a174427b35f8b551d681f48322a7ed7c3e20b212e1e7
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RXvector(RPackage):
"""Memory efficient S4 classes for storing sequences "externally" (behind
an R external pointer, or on disk)."""
homepage = "https://bioconductor.org/packages/XVector/"
git = "https://git.bioconductor.org/packages/XVector.git"
version('0.16.0', commit='54615888e1a559da4a81de33e934fc0f1c3ad99f')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@0.16.0')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-xvector/package.py
|
Python
|
lgpl-2.1
| 1,870
|
[
"Bioconductor"
] |
d2816c160afbf59e30876f50133c1d55e59fa4423af6156c5a9debf9c78b0380
|
from shapes import *
import random
def main():
random.seed()
# Create window
main_window = Container(0, 600, 0, 250, 25)
win = GraphWin("Neuron Mitochondria", main_window.xMax, main_window.yMax, autoflush=False)
# Buffer values for model, label, and arrow containers
model_label_buffer = 30
model_arrows_buffer = 10
# Create label container
label_width = 100
labels = Container(main_window.xMax - main_window.buffer - label_width,
main_window.xMax - main_window.buffer,
main_window.y + main_window.buffer,
main_window.yMax - main_window.buffer,
10)
# Mitochondria counter
counter_label = Text(Point(labels.mid.getX(), labels.mid.getY() - labels.buffer), "Mitochondria")
counter_num = Text(Point(labels.mid.getX(), labels.mid.getY() + labels.buffer), "Count: 0")
mito_count = 0
counter_label.draw(win)
counter_num.draw(win)
# Arrows container
arrows_height = 30
arrows = Container(main_window.x + main_window.buffer,
main_window.xMax - main_window.buffer - labels.dx() - model_label_buffer,
main_window.yMax - main_window.buffer - arrows_height,
main_window.yMax - main_window.buffer,
10)
# Anterograde arrow (shape and text)
arrow_text_buffer = 15
arrow_length = 50
ante_text = Text(Point(arrows.x + arrows.dx() / 6, arrows.mid.getY()), "Anterograde")
ante_text.setFill("blue")
ante_text.draw(win)
ante_arrow = Line(Point(ante_text.getAnchor().getX() - arrow_length / 2,
ante_text.getAnchor().getY() + arrow_text_buffer),
Point(ante_text.getAnchor().getX() + arrow_length / 2,
ante_text.getAnchor().getY() + arrow_text_buffer))
ante_arrow.setArrow("last")
ante_arrow.setWidth(4)
ante_arrow.setFill("blue")
ante_arrow.draw(win)
# Retrograde arrow (shape and text)
retro_text = Text(Point(arrows.xMax - arrows.dx() / 6, arrows.mid.getY()), "Retrograde")
retro_text.setFill("red")
retro_text.draw(win)
retro_arrow = Line(Point(retro_text.getAnchor().getX() + arrow_length / 2,
retro_text.getAnchor().getY() + arrow_text_buffer),
Point(retro_text.getAnchor().getX() - arrow_length / 2,
retro_text.getAnchor().getY() + arrow_text_buffer))
retro_arrow.setArrow("last")
retro_arrow.setWidth(4)
retro_arrow.setFill("red")
retro_arrow.draw(win)
# Create model
model = Container(main_window.x + main_window.buffer,
main_window.xMax - main_window.buffer - labels.dx() - model_label_buffer,
main_window.y + main_window.buffer,
main_window.yMax - arrows.dy() - main_window.buffer - model_arrows_buffer,
0)
# Arbitrary height of both halves of neuron body
neuron_height = model.dy() / 6
# Draw upper part of neuron
top_neuron = NeuronPoly(model, model.y + neuron_height, 50, win)
top_neuron.points.append(Point(model.xMax - model.buffer, model.y))
top_neuron.points.append(Point(model.x + model.buffer, model.y))
top_neuron.draw()
# Draw bottom part of neuron
bottom_neuron = NeuronPoly(model, model.yMax - model.buffer - neuron_height, 50, win)
bottom_neuron.points.append(Point(model.xMax - model.buffer, model.yMax))
bottom_neuron.points.append(Point(model.x + model.buffer, model.yMax))
bottom_neuron.draw()
# Set up area for mito to be placed
# Only allow mito to be within neuron edges
Mito.container = Container(model.x + model.buffer,
model.xMax - model.buffer,
top_neuron.avgHeight + top_neuron.maxHeightDev,
bottom_neuron.avgHeight - bottom_neuron.maxHeightDev,
Mito.mitoHeight)
Mito.defaultDx = model.dx() / 10000
# Create mitochondria objects
Mito.create(15, win)
Mito.showCollisions = False
# Run until mouse is clicked
try:
while not win.checkMouse():
Mito.checkCollisions()
for m in Mito.mitos:
# Auto draw a mito if none are currently on screen
if mito_count == 0:
if m.randDraw(1):
mito_count += 1
counter_num.setText("Count: {0}".format(mito_count))
# Randomly choose whether to draw new mito
elif not m.drawn:
if m.randDraw(10000):
mito_count += 1
counter_num.setText("Count: {0}".format(mito_count))
# Move mito if drawn
else:
m.move()
# Reset mito if it's crossed the neuron body
if m.checkEnd():
Mito.mitos.remove(m)
Mito.mitos.append(Mito(win))
mito_count -= 1
counter_num.setText("Count: {0}".format(mito_count))
# Limit the window refresh so that adding more mito won't slow the simulation down
update(1000)
# Pause for click in window
win.getMouse()
win.close()
except GraphicsError as err:
if "{0}".format(err) != "checkMouse in closed window":
print("GraphicsError: {0}".format(err))
# Run
if __name__ == "__main__":
random.seed()
main()
|
heztet/bio110
|
main.py
|
Python
|
gpl-3.0
| 5,672
|
[
"NEURON"
] |
897c246b2480d2c692637976afb813bbe6e2bec00825d4f2a271f14ab9bc99e0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import hashlib
import hmac
import json
import time
import urllib2
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core import mail
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils import timezone
from django.template import loader, Context
from mock import patch
from smartmin.tests import SmartminTest
from temba.contacts.models import Contact, ContactGroup, ContactURN, TEL_SCHEME, TWITTER_SCHEME
from temba.msgs.models import Msg, Broadcast, Call
from temba.channels.models import Channel, SyncEvent, Alert, ALERT_DISCONNECTED, ALERT_SMS, TWILIO, ANDROID, TWITTER
from temba.orgs.models import Org
from temba.tests import TembaTest, MockResponse
from temba.orgs.models import FREE_PLAN
from temba.utils import dict_to_struct
from .tasks import check_channels_task
class ChannelTest(TembaTest):
def setUp(self):
TembaTest.setUp(self)
self.channel.delete()
self.tel_channel = Channel.objects.create(name="Test Channel", org=self.org, country='RW',
channel_type="A", address="+250785551212", role="SR",
secret="12345", gcm_id="123",
created_by=self.user, modified_by=self.user)
self.twitter_channel = Channel.objects.create(name="Twitter Channel", org=self.org,
channel_type="TT", address="billy_bob", role="SR",
secret="78901",
created_by=self.user, modified_by=self.user)
self.released_channel = Channel.objects.create(name="Released Channel",
channel_type="NX",
created_by=self.user, modified_by=self.user,
secret=None, gcm_id="000")
def assertHasCommand(self, cmd_name, response):
self.assertEquals(200, response.status_code)
data = json.loads(response.content)
for cmd in data['cmds']:
if cmd['cmd'] == cmd_name:
return
raise Exception("Did not find '%s' cmd in response: '%s'" % (cmd_name, response.content))
def test_message_context(self):
context = self.tel_channel.build_message_context()
self.assertEqual(context['__default__'], '+250 785 551 212')
self.assertEqual(context['name'], 'Test Channel')
self.assertEqual(context['address'], '+250 785 551 212')
self.assertEqual(context['tel'], '+250 785 551 212')
self.assertEqual(context['tel_e164'], '+250785551212')
context = self.twitter_channel.build_message_context()
self.assertEqual(context['__default__'], 'billy_bob')
self.assertEqual(context['name'], 'Twitter Channel')
self.assertEqual(context['address'], 'billy_bob')
self.assertEqual(context['tel'], '')
self.assertEqual(context['tel_e164'], '')
context = self.released_channel.build_message_context()
self.assertEqual(context['__default__'], 'Released Channel')
self.assertEqual(context['name'], 'Released Channel')
self.assertEqual(context['address'], '')
self.assertEqual(context['tel'], '')
self.assertEqual(context['tel_e164'], '')
def test_delegate_channels(self):
# we don't support IVR yet
self.assertFalse(self.org.supports_ivr())
# add a delegate caller
Channel.add_call_channel(self.org, self.user, self.tel_channel)
# now we should be IVR capable
self.assertTrue(self.org.supports_ivr())
def test_schemes(self):
self.assertEquals(self.tel_channel.get_scheme(), TEL_SCHEME)
self.assertEquals(self.twitter_channel.get_scheme(), TWITTER_SCHEME)
self.assertEquals(self.released_channel.get_scheme(), TEL_SCHEME)
self.assertIn('A', Channel.types_for_scheme(TEL_SCHEME))
self.assertIn('TT', Channel.types_for_scheme(TWITTER_SCHEME))
def test_get_channel_type_name(self):
self.assertEquals(self.tel_channel.get_channel_type_name(), "Android Phone")
self.assertEquals(self.twitter_channel.get_channel_type_name(), "Twitter Channel")
self.assertEquals(self.released_channel.get_channel_type_name(), "Nexmo Channel")
def test_channel_selection(self):
# make our default tel channel MTN
mtn = self.tel_channel
mtn.name = "MTN"
mtn.save()
# create a channel for Tigo too
tigo = Channel.objects.create(name="Tigo", org=self.org, country='RW',
channel_type='T', address="+250725551212",
created_by=self.user, modified_by=self.user, secret="11111", gcm_id="456")
# new contact on MTN should send with the MTN channel
sms = self.send_message(['+250788382382'], "Sent to an MTN number")
self.assertEquals(mtn, self.org.get_send_channel(contact_urn=sms.contact_urn))
self.assertEquals(mtn, sms.channel)
# new contact on Tigo should send with the Tigo channel
sms = self.send_message(['+250728382382'], "Sent to a Tigo number")
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=sms.contact_urn))
self.assertEquals(tigo, sms.channel)
# now our MTN contact texts, the tigo number which should change their affinity
sms = Msg.create_incoming(tigo, (TEL_SCHEME, "+250788382382"), "Send an inbound message to Tigo")
self.assertEquals(tigo, sms.channel)
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=sms.contact_urn))
self.assertEquals(tigo, ContactURN.objects.get(path='+250788382382').channel)
# new contact on Airtel (some overlap) should send with the Tigo channel since it is newest
sms = self.send_message(['+250738382382'], "Sent to a Airtel number")
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=sms.contact_urn))
self.assertEquals(tigo, sms.channel)
# clear the affinity for our channel
ContactURN.objects.filter(path='+250788382382').update(channel=None)
# change channel numbers to be shortcodes, i.e. no overlap with contact numbers
mtn.address = '1234'
mtn.save()
tigo.address = '1235'
tigo.save()
# should return the newest channel which is TIGO
sms = self.send_message(['+250788382382'], "Sent to an MTN number, but with shortcode channels")
self.assertEquals(tigo, sms.channel)
self.assertEquals(tigo, self.org.get_send_channel(contact_urn=sms.contact_urn))
# check for twitter
self.assertEquals(self.twitter_channel, self.org.get_send_channel(scheme=TWITTER_SCHEME))
contact = self.create_contact("Billy", number="+250722222222", twitter="billy_bob")
twitter_urn = contact.get_urn(schemes=[TWITTER_SCHEME])
self.assertEquals(self.twitter_channel, self.org.get_send_channel(contact_urn=twitter_urn))
# calling without scheme or urn should raise exception
self.assertRaises(ValueError, self.org.get_send_channel)
def test_message_splitting(self):
# external API requires messages to be <= 160 chars
self.tel_channel.channel_type = 'EX'
self.tel_channel.save()
msg = Msg.create_outgoing(self.org, self.user, (TEL_SCHEME, '+250738382382'), 'x' * 400) # 400 chars long
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(3, Msg.objects.get(pk=msg.id).msg_count)
# Nexmo limit is 1600
self.tel_channel.channel_type = 'NX'
self.tel_channel.save()
cache.clear() # clear the channel from cache
msg = Msg.create_outgoing(self.org, self.user, (TEL_SCHEME, '+250738382382'), 'y' * 400)
Channel.send_message(dict_to_struct('MsgStruct', msg.as_task_json()))
self.assertEqual(self.tel_channel, Msg.objects.get(pk=msg.id).channel)
self.assertEqual(1, Msg.objects.get(pk=msg.id).msg_count)
def test_ensure_normalization(self):
self.tel_channel.country = 'RW'
self.tel_channel.save()
contact1 = self.create_contact("contact1", "0788111222")
contact2 = self.create_contact("contact2", "+250788333444")
contact3 = self.create_contact("contact3", "+18006927753")
self.tel_channel.ensure_normalized_contacts()
norm_c1 = Contact.objects.get(pk=contact1.pk)
norm_c2 = Contact.objects.get(pk=contact2.pk)
norm_c3 = Contact.objects.get(pk=contact3.pk)
self.assertEquals(norm_c1.get_urn(TEL_SCHEME).path, "+250788111222")
self.assertEquals(norm_c2.get_urn(TEL_SCHEME).path, "+250788333444")
self.assertEquals(norm_c3.get_urn(TEL_SCHEME).path, "+18006927753")
def test_delete(self):
self.org.administrators.add(self.user)
self.user.set_org(self.org)
self.login(self.user)
# a message, a call, and a broadcast
msg = self.send_message(['250788382382'], "How is it going?")
call = Call.create_call(self.tel_channel, "250788383385", timezone.now(), 5, 'mo', self.user)
self.assertEqual(self.org, msg.org)
self.assertEqual(self.tel_channel, msg.channel)
self.assertEquals(1, Msg.get_messages(self.org).count())
self.assertEquals(1, Call.get_calls(self.org).count())
self.assertEquals(1, Broadcast.get_broadcasts(self.org).count())
# start off in the pending state
self.assertEquals('P', msg.status)
response = self.fetch_protected(reverse('channels.channel_delete', args=[self.tel_channel.pk]), self.user)
self.assertContains(response, 'Test Channel')
response = self.fetch_protected(reverse('channels.channel_delete', args=[self.tel_channel.pk]),
post_data=dict(remove=True), user=self.user)
self.assertRedirect(response, reverse("orgs.org_home"))
msg = Msg.objects.get(pk=msg.pk)
self.assertIsNotNone(msg.channel)
self.assertIsNone(msg.channel.gcm_id)
self.assertIsNone(msg.channel.secret)
self.assertEquals(self.org, msg.org)
# queued messages for the channel should get marked as failed
self.assertEquals('F', msg.status)
call = Call.objects.get(pk=call.pk)
self.assertIsNotNone(call.channel)
self.assertIsNone(call.channel.gcm_id)
self.assertIsNone(call.channel.secret)
self.assertEquals(self.org, call.org)
broadcast = Broadcast.objects.get(pk=msg.broadcast.pk)
self.assertEquals(self.org, broadcast.org)
# should still be considered that user's message, call and broadcast
self.assertEquals(1, Msg.get_messages(self.org).count())
self.assertEquals(1, Call.get_calls(self.org).count())
self.assertEquals(1, Broadcast.get_broadcasts(self.org).count())
# syncing this channel should result in a release
post_data = dict(cmds=[dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="60", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# our response should contain a release
self.assertHasCommand('rel', response)
# create a channel
channel = Channel.objects.create(name="Test Channel", address="0785551212", country='RW',
org=self.org, created_by=self.user, modified_by=self.user,
secret="12345", gcm_id="123")
response = self.fetch_protected(reverse('channels.channel_delete', args=[channel.pk]), self.superuser)
self.assertContains(response, 'Test Channel')
response = self.fetch_protected(reverse('channels.channel_delete', args=[channel.pk]),
post_data=dict(remove=True), user=self.superuser)
self.assertRedirect(response, reverse("orgs.org_home"))
def test_list(self):
# visit the channel's list as a manager but not belonging to this organization
self.login(self.user)
response = self.client.get(reverse('channels.channel_list'), follow=True)
self.assertEquals(200, response.status_code)
# redirected to login page since the user does not have an org
self.assertEquals(response.request['PATH_INFO'], reverse('users.user_login'))
# add to this user an org
org = Org.objects.create(name="otherOrg", timezone="Africa/Kigali", created_by=self.user, modified_by=self.user)
org.administrators.add(self.user)
self.user.set_org(org)
response = self.client.get(reverse('channels.channel_list'), follow=True)
self.assertEquals(200, response.status_code)
self.assertContains(response, "Add a Channel")
# remove user form the other org
org.administrators.remove(self.user)
# visit the channel's list as a manager within the channel's organization
self.org.administrators.add(self.user)
self.user.set_org(self.org)
self.login(self.user)
# release twitter channel so that org has just one channel
self.twitter_channel.org = None
self.twitter_channel.is_active = False
self.twitter_channel.save()
channel = self.org.get_receive_channel(TEL_SCHEME)
response = self.client.get(reverse('channels.channel_list'), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('channels.channel_read', args=[channel.id]))
self.assertContains(response, 'Test Channel')
# add another channel to this organization
second_channel = Channel.objects.create(name="Second Channel", address="0755551212", org=self.org,
created_by=self.user, modified_by=self.user, secret="67890", gcm_id="456")
# now we go to the list page instead of read page for the sole channel
response = self.client.get(reverse('channels.channel_list'), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('channels.channel_list'))
self.assertEquals(response.context['object_list'].count(), 2)
self.assertContains(response, "Test Channel")
# clear out the phone and name for one of our channels
second_channel.name = None
second_channel.address = None
second_channel.save()
response = self.client.get(reverse('channels.channel_list'), follow=True)
self.assertContains(response, "Unknown")
self.assertContains(response, "Android Phone")
self.client.logout()
# visit the channel's list as administrator
self.org.administrators.add(self.user)
self.user.set_org(self.user)
response = self.fetch_protected(reverse('channels.channel_list'), self.user)
self.assertContains(response, 'Test Channel')
# visit ther channel's list as a superuser
response = self.fetch_protected(reverse('channels.channel_list'), self.superuser)
self.assertContains(response, 'Test Channel')
def test_channel_status(self):
# visit the main page as a user
self.login(self.user)
response = self.client.get('/', follow=True)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
self.assertNotIn('delayed_syncevents', response.context, msg="Found delayed_syncevents in context")
self.client.logout()
# visit the main page as superuser
self.login(self.superuser)
response = self.client.get('/', follow=True)
# superusers doesn't have orgs thus cannot have both values
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
self.assertNotIn('delayed_syncevents', response.context, msg="Found delayed_syncevents in context")
self.client.logout()
# add the user to an org
self.org.administrators.add(self.user)
self.user.set_org(self.org)
# visit the main page again as a user
self.login(self.user)
response = self.client.get('/', follow=True)
# there is not unsent nor delayed syncevents
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
self.assertNotIn('delayed_syncevents', response.context, msg="Found delayed_syncevents in context")
# replace existing channels with a single Android device
Channel.objects.update(is_active=False)
channel = Channel.objects.create(org=self.org, channel_type=ANDROID,
address="+250781112222", gcm_id="asdf", secret="asdf",
created_by=self.user, modified_by=self.user)
channel.created_on = timezone.now() - timedelta(hours=2)
channel.save()
response = self.client.get('/', Follow=True)
self.assertNotIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# simulate a sync in back in two hours
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="60",
net="UMTS", pending=[], retry=[])])
self.sync(channel, post_data)
sync_event = SyncEvent.objects.all()[0]
sync_event.created_on = timezone.now() - timedelta(hours=2)
sync_event.save()
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# add a message, just sent so shouldn't have delayed
msg = Msg.create_outgoing(self.org, self.user, (TEL_SCHEME, '250788123123'), "test")
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# but put it in the past
msg.created_on = timezone.now() - timedelta(hours=3)
msg.save()
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# if there is a successfully sent message after sms was created we do not consider it as delayed
success_msg = Msg.create_outgoing(self.org, self.user, (TEL_SCHEME, '+250788123123'), "success-send")
success_msg.created_on = timezone.now() - timedelta(hours=2)
success_msg.sent_on = timezone.now() - timedelta(hours=2)
success_msg.status = 'S'
success_msg.save()
response = self.client.get('/', Follow=True)
self.assertIn('delayed_syncevents', response.context)
self.assertNotIn('unsent_msgs', response.context, msg="Found unsent_msgs in context")
# test that editors have the channel of the the org the are using
other_user = self.create_user("Other")
self.create_secondary_org()
self.org2.administrators.add(other_user)
self.org.editors.add(other_user)
self.assertFalse(self.org2.channels.all())
self.login(other_user)
other_user.set_org(self.org2)
self.assertEquals(self.org2, other_user.get_org())
response = self.client.get('/', follow=True)
self.assertNotIn('channel_type', response.context, msg="Found channel_type in context")
other_user.set_org(self.org)
self.assertEquals(1, self.org.channels.filter(is_active=True).count())
self.assertEquals(self.org, other_user.get_org())
response = self.client.get('/', follow=True)
#self.assertIn('channel_type', response.context)
def sync(self, channel, post_data=None, signature=None):
if not post_data:
post_data = "{}"
else:
post_data = json.dumps(post_data)
ts = int(time.time())
if not signature:
# sign the request
key = str(channel.secret) + str(ts)
signature = hmac.new(key=key, msg=bytes(post_data), digestmod=hashlib.sha256).digest()
# base64 and url sanitize
signature = urllib2.quote(base64.urlsafe_b64encode(signature))
return self.client.post("%s?signature=%s&ts=%d" % (reverse('sync', args=[channel.pk]), signature, ts),
content_type='application/json', data=post_data)
def test_update(self):
update_url = reverse('channels.channel_update', args=[self.tel_channel.id])
# only user of the org can view the update page of a channel
self.client.logout()
self.login(self.user)
response = self.client.get(update_url)
self.assertEquals(302, response.status_code)
self.login(self.user)
# visit the channel's update page as a manager within the channel's organization
self.org.administrators.add(self.user)
response = self.fetch_protected(update_url, self.user)
self.assertEquals(200, response.status_code)
self.assertEquals(response.request['PATH_INFO'], update_url)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel")
self.assertEquals(channel.address, "+250785551212")
postdata = dict()
postdata['name'] = "Test Channel Update1"
postdata['address'] = "+250785551313"
self.login(self.user)
response = self.client.post(update_url, postdata, follow=True)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update1")
self.assertEquals(channel.address, "+250785551313")
# if we change the channel to a twilio type, shouldn't be able to edit our address
channel.channel_type = TWILIO
channel.save()
response = self.client.get(update_url)
self.assertFalse('address' in response.context['form'].fields)
# bring it back to android
channel.channel_type = ANDROID
channel.save()
# visit the channel's update page as administrator
self.org.administrators.add(self.user)
self.user.set_org(self.org)
response = self.fetch_protected(update_url, self.user)
self.assertEquals(200, response.status_code)
self.assertEquals(response.request['PATH_INFO'], update_url)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update1")
self.assertEquals(channel.address, "+250785551313")
postdata = dict()
postdata['name'] = "Test Channel Update2"
postdata['address'] = "+250785551414"
response = self.fetch_protected(update_url, self.user, postdata)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update2")
self.assertEquals(channel.address, "+250785551414")
# visit the channel's update page as superuser
self.superuser.set_org(self.org)
response = self.fetch_protected(update_url, self.superuser)
self.assertEquals(200, response.status_code)
self.assertEquals(response.request['PATH_INFO'], update_url)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update2")
self.assertEquals(channel.address, "+250785551414")
postdata = dict()
postdata['name'] = "Test Channel Update3"
postdata['address'] = "+250785551515"
response = self.fetch_protected(update_url, self.superuser, postdata)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Test Channel Update3")
self.assertEquals(channel.address, "+250785551515")
# make sure channel works with alphanumeric numbers
channel.address = "EATRIGHT"
self.assertEquals("EATRIGHT", channel.get_address_display())
self.assertEquals("EATRIGHT", channel.get_address_display(e164=True))
# change channel type to Twitter
channel.channel_type = TWITTER
channel.address = 'billy_bob'
channel.config = json.dumps({'handle_id': 12345, 'auto_follow': True, 'oauth_token': 'abcdef', 'oauth_token_secret': '23456'})
channel.save()
response = self.fetch_protected(update_url, self.user)
self.assertEquals(200, response.status_code)
self.assertIn('name', response.context['fields'])
self.assertIn('alert_email', response.context['fields'])
self.assertIn('auto_follow', response.context['fields'])
self.assertIn('address', response.context['fields'])
self.assertNotIn('country', response.context['fields'])
postdata = dict()
postdata['name'] = "Twitter2"
postdata['alert_email'] = "bob@example.com"
postdata['auto_follow'] = False
postdata['address'] = "billy_bob"
self.fetch_protected(update_url, self.user, postdata)
channel = Channel.objects.get(pk=self.tel_channel.id)
self.assertEquals(channel.name, "Twitter2")
self.assertEquals(channel.alert_email, "bob@example.com")
self.assertEquals(channel.address, "billy_bob")
self.assertFalse(json.loads(channel.config)['auto_follow'])
def test_read(self):
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="60",
net="UMTS", pending=[], retry=[])])
# now send the channel's updates
self.sync(self.tel_channel, post_data)
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="FUL", p_src="AC", p_lvl="100",
net="WIFI", pending=[], retry=[])])
# now send the channel's updates
self.sync(self.tel_channel, post_data)
self.assertEquals(2, SyncEvent.objects.all().count())
# only user of the org can view the detail page of a channel
self.client.logout()
self.login(self.user)
response = self.client.get(reverse('channels.channel_read', args=[self.tel_channel.id]))
self.assertEquals(302, response.status_code)
self.login(self.user)
# visit the channel's read page as a manager within the channel's organization
self.org.administrators.add(self.user)
self.user.set_org(self.org)
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.id]), self.user)
self.assertEquals(len(response.context['source_stats']), len(SyncEvent.objects.values_list('power_source', flat=True).distinct()))
self.assertEquals('AC',response.context['source_stats'][0][0])
self.assertEquals(1,response.context['source_stats'][0][1])
self.assertEquals('BAT',response.context['source_stats'][1][0])
self.assertEquals(1,response.context['source_stats'][0][1])
self.assertEquals(len(response.context['network_stats']), len(SyncEvent.objects.values_list('network_type', flat=True).distinct()))
self.assertEquals('UMTS',response.context['network_stats'][0][0])
self.assertEquals(1,response.context['network_stats'][0][1])
self.assertEquals('WIFI',response.context['network_stats'][1][0])
self.assertEquals(1,response.context['network_stats'][1][1])
self.assertTrue(len(response.context['latest_sync_events']) <= 5)
two_hours_ago = timezone.now() - timedelta(hours=2)
# make sure our channel is old enough to trigger alerts
self.tel_channel.created_on = two_hours_ago
self.tel_channel.save()
# delayed sync status
for sync in SyncEvent.objects.all():
sync.created_on = two_hours_ago
sync.save()
# add a message, just sent so shouldn't be delayed
msg = Msg.create_outgoing(self.org, self.user, (TEL_SCHEME, '250785551212'), 'delayed message')
msg.created_on = two_hours_ago
msg.save()
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.id]), self.user)
self.assertIn('delayed_sync_event', response.context_data.keys())
self.assertIn('unsent_msgs_count', response.context_data.keys())
# with superuser
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.id]), self.superuser)
self.assertEquals(200, response.status_code)
# now that we can access the channel, which messages do we display in the chart?
joe = self.create_contact('Joe', '+2501234567890')
test_contact = self.create_contact('Testing', '+123456789012')
test_contact.is_test = True
test_contact.save()
r_incomings = response.context['message_stats'][0]['data'][-1]['count']
r_outgoings = response.context['message_stats'][1]['data'][-1]['count']
# send messages with a test contact
Msg.create_incoming(self.tel_channel, (TEL_SCHEME, test_contact.get_urn().path), 'This incoming message will not be counted')
Msg.create_outgoing(self.org, self.user, test_contact, 'This outgoing message will not be counted')
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.id]), self.superuser)
self.assertEquals(200, response.status_code)
self.assertEquals(response.context['message_stats'][0]['data'][-1]['count'], r_incomings)
self.assertEquals(response.context['message_stats'][1]['data'][-1]['count'], r_outgoings)
# send messages with a normal contact
Msg.create_incoming(self.tel_channel, (TEL_SCHEME, joe.get_urn(TEL_SCHEME).path), 'This incoming message will be counted')
Msg.create_outgoing(self.org, self.user, joe, 'This outgoing message will be counted')
response = self.fetch_protected(reverse('channels.channel_read', args=[self.tel_channel.id]), self.superuser)
self.assertEquals(200, response.status_code)
self.assertEquals(response.context['message_stats'][0]['data'][-1]['count'], r_incomings+1)
self.assertEquals(response.context['message_stats'][1]['data'][-1]['count'], r_outgoings+1)
def test_invalid(self):
# Must be POST
response = self.client.get("%s?signature=sig&ts=123" % (reverse('sync', args=[100])), content_type='application/json')
self.assertEquals(500, response.status_code)
# Unknown channel
response = self.client.post("%s?signature=sig&ts=123" % (reverse('sync', args=[999])), content_type='application/json')
self.assertEquals(200, response.status_code)
self.assertEquals('rel', json.loads(response.content)['cmds'][0]['cmd'])
# too old
ts = int(time.time()) - 60*16
response = self.client.post("%s?signature=sig&ts=%d" % (reverse('sync', args=[self.tel_channel.pk]), ts), content_type='application/json')
self.assertEquals(401, response.status_code)
self.assertEquals(3, json.loads(response.content)['error_id'])
def test_register_and_claim(self):
self.org.administrators.add(self.user)
self.user.set_org(self.org)
post_data = json.dumps(dict(cmds=[dict(cmd="gcm", gcm_id="claim_test", uuid='uuid'), dict(cmd='status', cc='RW', dev='Nexus')]))
# must be a post
response = self.client.get(reverse('register'), content_type='application/json')
self.assertEquals(500, response.status_code)
# try a legit register
response = self.client.post(reverse('register'), content_type='application/json', data=post_data)
self.assertEquals(200, response.status_code)
channel_object = Channel.objects.get(gcm_id="claim_test")
self.assertEquals('RW', channel_object.country)
self.assertEquals('Nexus', channel_object.device)
channel = json.loads(response.content)['cmds'][0]
self.assertEquals(channel['relayer_id'], channel_object.pk)
response = self.client.post(reverse('register'), content_type='application/json', data=post_data)
self.assertEquals(200, response.status_code)
channel = json.loads(response.content)['cmds'][0]
self.assertEquals(channel['relayer_id'], channel_object.pk)
# try to claim with an invalid phone number
response = self.fetch_protected(reverse('channels.channel_claim_android'), self.user,
post_data=dict(claim_code=channel['relayer_claim_code'],
phone_number="078123"),
failOnFormValidation=False)
self.assertEquals(200, response.status_code)
self.assertContains(response, "Invalid phone number")
# claim our channel
response = self.fetch_protected(reverse('channels.channel_claim_android'), self.user,
post_data=dict(claim_code=channel['relayer_claim_code'],
phone_number="0788123123"))
# alert email should default to the currently logged in user
new_channel = Channel.objects.get(org=self.org, address='+250788123123')
self.assertEquals(self.user.email, new_channel.alert_email)
self.assertTrue('success' in response.get('Location', None))
self.assertRedirect(response, reverse('public.public_welcome'))
# try having a device register again
response = self.client.post(reverse('register'), content_type='application/json', data=post_data)
self.assertEquals(200, response.status_code)
# should be two channels with that gcm id
self.assertEquals(2, Channel.objects.filter(gcm_id='claim_test').count())
# but only one with an org
active = Channel.objects.filter(gcm_id='claim_test').exclude(org=None)
self.assertEquals(1, len(active))
active = active[0]
self.assertEquals(channel['relayer_id'], active.pk)
self.assertEquals('+250788123123', active.address)
# but if we claim our new one, we'll clear out our previous one
new_channel = Channel.objects.get(gcm_id='claim_test', org=None)
response = self.fetch_protected(reverse('channels.channel_claim_android'), self.user,
post_data=dict(claim_code=new_channel.claim_code, phone_number="+250788123124"))
self.assertRedirect(response, reverse('public.public_welcome'))
channel = Channel.objects.get(gcm_id='claim_test', is_active=True)
self.assertEquals(channel.pk, new_channel.pk)
self.assertEquals('+250788123124', channel.address)
# try to claim a bogus channel
response = self.fetch_protected(reverse('channels.channel_claim_android'), self.user, post_data=dict(claim_code="Your Mom"), failOnFormValidation=False)
self.assertEquals(200, response.status_code)
self.assertContains(response, 'Invalid claim code')
# check our primary tel channel is the same as our outgoing
self.assertEquals(self.org.get_receive_channel(TEL_SCHEME), self.org.get_send_channel(TEL_SCHEME))
self.assertFalse(self.org.get_send_channel(TEL_SCHEME).is_delegate_sender())
channel = self.org.get_send_channel(TEL_SCHEME).pk
# now claim a bulk sender
self.fetch_protected("%s?connection=NX&channel=%d" % (reverse('channels.channel_create_bulk_sender'), channel),
self.user, post_data=dict(connection='NX', channel=channel), failOnFormValidation=False)
# shouldn't work without a Nexmo account connected
self.assertFalse(self.org.get_send_channel(TEL_SCHEME).is_delegate_sender())
self.assertFalse(self.org.is_connected_to_nexmo())
# now connect to nexmo
with patch('temba.nexmo.NexmoClient.update_account') as connect:
connect.return_value = True
self.org.connect_nexmo('123', '456')
self.org.save()
self.assertTrue(self.org.is_connected_to_nexmo())
# now adding our bulk sender should work
self.fetch_protected("%s?connection=NX&channel=%d" % (reverse('channels.channel_create_bulk_sender'), channel),
self.user, post_data=dict(connection='NX', channel=channel))
self.assertTrue(self.org.get_send_channel(TEL_SCHEME).is_delegate_sender())
# now we should have a new outgoing sender
self.assertNotEqual(self.org.get_receive_channel(TEL_SCHEME), self.org.get_send_channel(TEL_SCHEME))
self.assertTrue(self.org.get_send_channel(TEL_SCHEME).is_delegate_sender())
self.assertFalse(self.org.get_receive_channel(TEL_SCHEME).is_delegate_sender())
# create a US channel and try claiming it next to our RW channels
post_data = json.dumps(dict(cmds=[dict(cmd="gcm", gcm_id="claim_test", uuid='uuid'), dict(cmd='status', cc='US', dev='Nexus')]))
response = self.client.post(reverse('register'), content_type='application/json', data=post_data)
channel = json.loads(response.content)['cmds'][0]
response = self.fetch_protected(reverse('channels.channel_claim_android'), self.user, post_data=dict(claim_code=channel['relayer_claim_code'], phone_number="0788382382"), failOnFormValidation=False)
self.assertEquals(200, response.status_code, "Claimed channels from two different countries")
self.assertContains(response, "you can only add numbers for the same country")
response = self.fetch_protected(reverse('channels.channel_claim'), self.user)
self.assertEquals(200, response.status_code)
self.assertEquals(response.context['twilio_countries'], "Belgium, Canada, Finland, Norway, Poland, Spain, Sweden, United Kingdom or United States")
def test_claim_nexmo(self):
self.login(self.admin)
# remove any existing channels
self.org.channels.update(is_active=False, org=None)
# make sure nexmo is on the claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Nexmo")
self.assertContains(response, reverse('orgs.org_nexmo_connect'))
# connect nexmo
connect_url = reverse('orgs.org_nexmo_connect')
# simulate invalid credentials
with patch('requests.get') as nexmo:
nexmo.return_value = MockResponse(401, '{"error-code": "401"}')
response = self.client.post(connect_url, dict(api_key='key', api_secret='secret'))
self.assertContains(response, "Your Nexmo API key and secret seem invalid.")
self.assertFalse(self.org.is_connected_to_nexmo())
# ok, now with a success
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
# believe it or not nexmo returns 'error-code' 200
nexmo_get.return_value = MockResponse(200, '{"error-code": "200"}')
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
self.client.post(connect_url, dict(api_key='key', api_secret='secret'))
# nexmo should now be connected
self.org = Org.objects.get(pk=self.org.pk)
self.assertTrue(self.org.is_connected_to_nexmo())
self.assertEquals(self.org.config_json()['NEXMO_KEY'], 'key')
self.assertEquals(self.org.config_json()['NEXMO_SECRET'], 'secret')
# hit the claim page, should now have a claim nexmo link
claim_nexmo = reverse('channels.channel_claim_nexmo')
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, claim_nexmo)
# let's add a number already connected to the account
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
nexmo_get.return_value = MockResponse(200, '{"count":1,"numbers":[{"type":"mobile-lvn","country":"US","msisdn":"13607884540"}] }')
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
# make sure our number appears on the claim page
response = self.client.get(claim_nexmo)
self.assertContains(response, '360-788-4540')
# claim it
response = self.client.post(claim_nexmo, dict(country='US', phone_number='13607884540'))
self.assertRedirects(response, reverse('public.public_welcome') + "?success")
# make sure it is actually connected
Channel.objects.get(channel_type='NX', org=self.org)
def test_claim_twitter(self):
# add to this user an org
org = Org.objects.create(name="otherOrg", timezone="Africa/Kigali", created_by=self.user, modified_by=self.user)
org.administrators.add(self.user)
self.user.set_org(org)
self.user.groups.add(Group.objects.get(name="Beta")) # enable beta features
self.login(self.user)
claim_url = reverse('channels.channel_claim_twitter')
with patch('twython.Twython.get_authentication_tokens') as get_authentication_tokens:
get_authentication_tokens.return_value = dict(oauth_token='abcde',
oauth_token_secret='12345',
auth_url='http://example.com/auth')
response = self.client.get(claim_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['twitter_auth_url'], 'http://example.com/auth')
self.assertEqual(self.client.session['twitter_oauth_token'], 'abcde')
self.assertEqual(self.client.session['twitter_oauth_token_secret'], '12345')
with patch('temba.utils.mage.MageClient.add_twitter_stream') as add_twitter_stream:
add_twitter_stream.return_value = dict()
with patch('twython.Twython.get_authorized_tokens') as get_authorized_tokens:
get_authorized_tokens.return_value = dict(screen_name='billy_bob',
user_id=123,
oauth_token='bcdef',
oauth_token_secret='23456')
response = self.client.get(claim_url, {'oauth_verifier': 'vwxyz'}, follow=True)
self.assertNotIn('twitter_oauth_token', self.client.session)
self.assertNotIn('twitter_oauth_token_secret', self.client.session)
self.assertEqual(response.status_code, 200)
channel = response.context['object']
self.assertEqual(channel.address, 'billy_bob')
config = json.loads(channel.config)
self.assertEqual(config['handle_id'], 123)
self.assertEqual(config['oauth_token'], 'bcdef')
self.assertEqual(config['oauth_token_secret'], '23456')
# re-add same account but with different auth credentials
s = self.client.session
s['twitter_oauth_token'] = 'cdefg'
s['twitter_oauth_token_secret'] = '34567'
s.save()
with patch('twython.Twython.get_authorized_tokens') as get_authorized_tokens:
get_authorized_tokens.return_value = dict(screen_name='billy_bob',
user_id=123,
oauth_token='defgh',
oauth_token_secret='45678')
response = self.client.get(claim_url, {'oauth_verifier': 'uvwxy'}, follow=True)
self.assertEqual(response.status_code, 200)
channel = response.context['object']
self.assertEqual(channel.address, 'billy_bob')
config = json.loads(channel.config)
self.assertEqual(config['handle_id'], 123)
self.assertEqual(config['oauth_token'], 'defgh')
self.assertEqual(config['oauth_token_secret'], '45678')
def send_message(self, numbers, message, org=None, user=None):
if not org:
org = self.org
if not user:
user = self.user
group = ContactGroup.get_or_create(org, user, ",".join(numbers))
contacts = list()
for number in numbers:
contacts.append(Contact.get_or_create(org, user, name=None, urns=[(TEL_SCHEME, number)]))
group.contacts.add(*contacts)
broadcast = Broadcast.create(org, user, message, [group])
broadcast.send()
sms = Msg.objects.filter(broadcast=broadcast).order_by('text', 'pk')
if len(numbers) == 1:
return sms.first()
else:
return list(sms)
def test_unclaimed(self):
response = self.sync(self.released_channel)
self.assertEquals(200, response.status_code)
response = json.loads(response.content)
# should be a registration command containing a new claim code
self.assertEquals(response['cmds'][0]['cmd'], 'reg')
post_data = dict(cmds=[dict(cmd="status",
org_id=self.released_channel.pk,
p_lvl=84,
net="WIFI",
p_sts="CHA",
p_src="USB",
pending=[],
retry=[])])
# try syncing against the released channel that has a secret
self.released_channel.secret = "999"
self.released_channel.save()
response = self.sync(self.released_channel, post_data=post_data)
response = json.loads(response.content)
# registration command
self.assertEquals(response['cmds'][0]['cmd'], 'reg')
# claim the channel on the site
self.released_channel.org = self.org
self.released_channel.save()
post_data = dict(cmds=[dict(cmd="status",
org_id="-1",
p_lvl=84,
net="WIFI",
p_sts="STATUS_CHARGING",
p_src="USB",
pending=[],
retry=[])])
response = self.sync(self.released_channel, post_data=post_data)
response = json.loads(response.content)
# should now be a claim command in return
self.assertEquals(response['cmds'][0]['cmd'], 'claim')
# now try releasing the channel from the client
post_data = dict(cmds=[dict(cmd="reset", p_id=1)])
response = self.sync(self.released_channel, post_data=post_data)
response = json.loads(response.content)
# channel should be released now
channel = Channel.objects.get(pk=self.released_channel.pk)
self.assertFalse(channel.org)
self.assertFalse(channel.is_active)
def test_quota_exceeded(self):
# set our org to be on the trial plan
self.org.plan = FREE_PLAN
self.org.save()
self.org.topups.all().update(credits=10)
self.assertEquals(10, self.org.get_credits_remaining())
self.assertEquals(0, self.org.get_credits_used())
# if we sync should get one message back
msg1 = self.send_message(['250788382382'], "How is it going?")
response = self.sync(self.tel_channel)
self.assertEquals(200, response.status_code)
response = json.loads(response.content)
self.assertEqual(1, len(response['cmds']))
self.assertEquals(9, self.org.get_credits_remaining())
self.assertEquals(1, self.org.get_credits_used())
# let's create 10 other messages, this will put our last message above our quota
for i in range(10):
self.send_message(['250788382%03d' % i], "This is message # %d" % i)
# should get the 10 messages we are allotted back, not the 11 that exist
response = self.sync(self.tel_channel)
self.assertEquals(200, response.status_code)
response = json.loads(response.content)
self.assertEqual(10, len(response['cmds']))
def test_sync(self):
date = timezone.now()
date = int(time.mktime(date.timetuple())) * 1000
# create a payload from the client
bcast = self.send_message(['250788382382', '250788383383'], "How is it going?")
msg1 = bcast[0]
msg2 = bcast[1]
msg3 = self.send_message(['250788382382'], "What is your name?")
msg4 = self.send_message(['250788382382'], "Do you have any children?")
msg5 = self.send_message(['250788382382'], "What's my dog's name?")
self.org.administrators.add(self.user)
self.user.set_org(self.org)
# Check our sync point has all three messages queued for delivery
response = self.sync(self.tel_channel)
self.assertEquals(200, response.status_code)
response = json.loads(response.content)
cmds = response['cmds']
self.assertEqual(4, len(cmds))
# assert that our first command is the two message broadcast
cmd = cmds[0]
self.assertEquals("How is it going?", cmd['msg'])
self.assertTrue('+250788382382' in [m['phone'] for m in cmd['to']])
self.assertTrue('+250788383383' in [m['phone'] for m in cmd['to']])
self.assertTrue(msg1.pk in [m['id'] for m in cmd['to']])
self.assertTrue(msg2.pk in [m['id'] for m in cmd['to']])
# add another message we'll pretend is in retry to see that we exclude them from sync
msg6 = self.send_message(['250788382382'], "Pretend this message is in retry on the client, don't send it on sync")
post_data = dict(cmds=[
# device gcm data
dict(cmd='gcm', gcm_id='12345', uuid='abcde'),
# device details status
dict(cmd="status", p_sts="DIS", p_src="BAT", p_lvl="60",
net="UMTS", org_id=8, retry=[msg6.pk], pending=[]),
# results for the outgoing messages
dict(cmd="mt_sent", msg_id=msg1.pk, ts=date),
dict(cmd="mt_sent", msg_id=msg2.pk, ts=date),
dict(cmd="mt_dlvd", msg_id=msg3.pk, ts=date),
dict(cmd="mt_error", msg_id=msg4.pk, ts=date),
dict(cmd="mt_fail", msg_id=msg5.pk, ts=date),
# a missed call
dict(cmd="call", phone="2505551212", type='miss', ts=date),
# incoming
dict(cmd="call", phone="2505551212", type='mt', dur=10, ts=date),
# outgoing
dict(cmd="call", phone="+250788383383", type='mo', dur=5, ts=date),
# a new incoming message
dict(cmd="mo_sms", phone="+250788383383", msg="This is giving me trouble", p_id="1", ts=date)])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# new batch, our ack and our claim command for new org
self.assertEquals(2, len(json.loads(response.content)['cmds']))
# check that our messages were updated accordingly
self.assertEqual(2, Msg.objects.filter(channel=self.tel_channel, status='S', direction='O').count())
self.assertEqual(1, Msg.objects.filter(channel=self.tel_channel, status='D', direction='O').count())
self.assertEqual(1, Msg.objects.filter(channel=self.tel_channel, status='E', direction='O').count())
self.assertEqual(1, Msg.objects.filter(channel=self.tel_channel, status='F', direction='O').count())
# we should now have a new incoming message
self.assertEqual(1, Msg.objects.filter(direction='I').count())
# We should now have one sync
self.assertEquals(1, SyncEvent.objects.filter(channel=self.tel_channel).count())
# check our channel gcm and uuid were updated
self.tel_channel = Channel.objects.get(pk=self.tel_channel.pk)
self.assertEquals('12345', self.tel_channel.gcm_id)
self.assertEquals('abcde', self.tel_channel.uuid)
# set an email on our channel
self.tel_channel.alert_email = 'fred@worldrelif.org'
self.tel_channel.save()
# We should not have an alert this time
self.assertEquals(0, Alert.objects.all().count())
# the case the status must be be reported
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="DIS", p_src="BAT", p_lvl="20", net="UMTS", retry=[], pending=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should now have an Alert
self.assertEquals(1, Alert.objects.all().count())
# and at this time it must be not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# the case the status must be be reported but already notification sent
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="DIS", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should not create a new alert
self.assertEquals(1, Alert.objects.all().count())
# still not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# Let plug the channel to charger
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# only one alert
self.assertEquals(1, Alert.objects.all().count())
# and we end all alert related to this issue
self.assertEquals(0, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# clear the alerts
Alert.objects.all().delete()
# the case the status is in unknown state
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="UNK", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should now create a new alert
self.assertEquals(1, Alert.objects.all().count())
# one alert not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# Let plug the channel to charger to end this unknown power status
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# still only one alert
self.assertEquals(1, Alert.objects.all().count())
# and we end all alert related to this issue
self.assertEquals(0, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# clear all the alerts
Alert.objects.all().delete()
# the case the status is in not charging state
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="NOT", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# we should now create a new alert
self.assertEquals(1, Alert.objects.all().count())
# one alert not ended
self.assertEquals(1, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
# Let plug the channel to charger to end this unknown power status
post_data = dict(cmds=[
# device details status
dict(cmd="status", p_sts="CHA", p_src="BAT", p_lvl="15", net="UMTS", pending=[], retry=[])])
# now send the channel's updates
response = self.sync(self.tel_channel, post_data)
# first we have a new alert created
self.assertEquals(1, Alert.objects.all().count())
# and we end all alert related to this issue
self.assertEquals(0, Alert.objects.filter(sync_event__channel=self.tel_channel, ended_on=None, alert_type='P').count())
def test_signing(self):
# good signature
self.assertEquals(200, self.sync(self.tel_channel).status_code)
# bad signature, should result in 401 Unauthorized
self.assertEquals(401, self.sync(self.tel_channel, signature="badsig").status_code)
def test_inbox_duplication(self):
# if the connection gets interrupted but some messages succeed, we want to make sure subsequent
# syncs do not result in duplication of messages from the inbox
date = timezone.now()
date = int(time.mktime(date.timetuple())) * 1000
post_data = dict(cmds=[
dict(cmd="mo_sms", phone="2505551212", msg="First message", p_id="1", ts=date),
dict(cmd="mo_sms", phone="2505551212", msg="First message", p_id="2", ts=date),
dict(cmd="mo_sms", phone="2505551212", msg="A second message", p_id="3", ts=date)])
response = self.sync(self.tel_channel, post_data)
self.assertEquals(200, response.status_code)
responses = json.loads(response.content)
cmds = responses['cmds']
# check the server gave us responses for our messages
r0 = self.get_response(cmds, '1')
r1 = self.get_response(cmds, '2')
r2 = self.get_response(cmds, '3')
self.assertIsNotNone(r0)
self.assertIsNotNone(r1)
self.assertIsNotNone(r2)
# first two should have the same server id
self.assertEquals(r0['extra'], r1['extra'])
# One was a duplicate, should only have 2
self.assertEqual(2, Msg.objects.filter(direction='I').count())
def get_response(self, responses, p_id):
for response in responses:
if 'p_id' in response and response['p_id'] == p_id:
return response
class ChannelBatchTest(TembaTest):
def test_time_utils(self):
from temba.utils import datetime_to_ms, ms_to_datetime
now = timezone.now()
now = now.replace(microsecond=now.microsecond / 1000 * 1000)
epoch = datetime_to_ms(now)
self.assertEquals(ms_to_datetime(epoch), now)
class SyncEventTest(SmartminTest):
def setUp(self):
self.superuser = User.objects.create_superuser(username="super", email="super@user.com", password="super")
self.user = self.create_user("tito")
self.org = Org.objects.create(name="Temba", timezone="Africa/Kigali", created_by=self.user, modified_by=self.user)
self.tel_channel = Channel.objects.create(name="Test Channel", address="0785551212", org=self.org,
created_by=self.user, modified_by=self.user,
secret="12345", gcm_id="123")
def test_sync_event_model(self):
self.sync_event = SyncEvent.create(self.tel_channel, dict(p_src="AC", p_sts="DIS", p_lvl=80, net="WIFI", pending=[1, 2], retry=[3, 4], cc='RW'), [1,2])
self.assertEquals(SyncEvent.objects.all().count(), 1)
self.assertEquals(self.sync_event.get_pending_messages(), [1, 2])
self.assertEquals(self.sync_event.get_retry_messages(), [3, 4])
self.assertEquals(self.sync_event.incoming_command_count, 0)
self.sync_event = SyncEvent.create(self.tel_channel, dict(p_src="AC", p_sts="DIS", p_lvl=80, net="WIFI", pending=[1, 2], retry=[3, 4], cc='US'), [1])
self.assertEquals(self.sync_event.incoming_command_count, 0)
self.tel_channel = Channel.objects.get(pk=self.tel_channel.pk)
self.assertEquals('US', self.tel_channel.country)
class ChannelAlertTest(TembaTest):
def test_no_alert_email(self):
# set our last seen to a while ago
self.channel.last_seen = timezone.now() - timedelta(minutes=40)
self.channel.save()
check_channels_task()
self.assertTrue(len(mail.outbox) == 0)
# add alert email, remove org and set last seen to now to force an resolve email to try to send
self.channel.alert_email = 'fred@unicef.org'
self.channel.org = None
self.channel.last_seen = timezone.now()
self.channel.save()
check_channels_task()
self.assertTrue(len(mail.outbox) == 0)
def test_external(self):
from temba.channels.models import EXTERNAL
Channel.objects.all().delete()
self.login(self.admin)
# should see the general channel claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, reverse('channels.channel_claim_external'))
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_external'))
post_data = response.context['form'].initial
url = 'http://test.com/send.php?from={{from}}&text={{text}}&to={{to}}'
post_data['number'] = '12345'
post_data['country'] = 'RW'
post_data['url'] = url
post_data['method'] = 'GET'
response = self.client.post(reverse('channels.channel_claim_external'), post_data)
channel = Channel.objects.get()
self.assertEquals('RW', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['url'], channel.config_json()['send_url'])
self.assertEquals(post_data['method'], channel.config_json()['method'])
self.assertEquals(EXTERNAL, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('api.external_handler', args=['sent', channel.uuid]))
self.assertContains(response, reverse('api.external_handler', args=['delivered', channel.uuid]))
self.assertContains(response, reverse('api.external_handler', args=['failed', channel.uuid]))
self.assertContains(response, reverse('api.external_handler', args=['received', channel.uuid]))
# test substitution in our url
self.assertEquals('http://test.com/send.php?from=5080&text=test&to=%2B250788383383',
channel.build_send_url(url, { 'from':"5080", 'text':"test", 'to':"+250788383383" }))
# test substitution with unicode
self.assertEquals('http://test.com/send.php?from=5080&text=Reply+%E2%80%9C1%E2%80%9D+for+good&to=%2B250788383383',
channel.build_send_url(url, { 'from':"5080", 'text':u"Reply “1” for good", 'to':"+250788383383" }))
def test_shaqodoon(self):
from temba.channels.models import SHAQODOON
Channel.objects.all().delete()
self.login(self.admin)
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_shaqodoon'))
post_data = response.context['form'].initial
url = 'http://test.com/send.php'
post_data['username'] = 'uname'
post_data['password'] = 'pword'
post_data['url'] = 'http://test.com/send.php'
post_data['key'] = 'secret_key'
post_data['number'] = '301'
response = self.client.post(reverse('channels.channel_claim_shaqodoon'), post_data)
channel = Channel.objects.get()
self.assertEquals('SO', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['url'], channel.config_json()['send_url'])
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals(post_data['key'], channel.config_json()['key'])
self.assertEquals(SHAQODOON, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('api.shaqodoon_handler', args=['received', channel.uuid]))
def test_kannel(self):
from temba.channels.models import KANNEL
Channel.objects.all().delete()
self.login(self.admin)
# should see the general channel claim page
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, reverse('channels.channel_claim_kannel'))
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_kannel'))
post_data = response.context['form'].initial
post_data['number'] = '3071'
post_data['country'] = 'RW'
post_data['url'] = 'http://kannel.temba.com/cgi-bin/sendsms'
response = self.client.post(reverse('channels.channel_claim_kannel'), post_data)
channel = Channel.objects.get()
self.assertEquals('RW', channel.country)
self.assertTrue(channel.uuid)
self.assertEquals(post_data['number'], channel.address)
self.assertEquals(post_data['url'], channel.config_json()['send_url'])
# make sure we generated a username and password
self.assertTrue(channel.config_json()['username'])
self.assertTrue(channel.config_json()['password'])
self.assertEquals(KANNEL, channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
# our configuration page should list our receive URL
self.assertContains(response, reverse('api.kannel_handler', args=['receive', channel.uuid]))
def test_zenvia(self):
Channel.objects.all().delete()
self.login(self.admin)
# shouldn't be able to see the claim zenvia page if we aren't part of that group
response = self.client.get(reverse('channels.channel_claim'))
self.assertNotContains(response, "Zenvia")
# but if we are in the proper time zone
self.org.timezone = 'America/Sao_Paulo'
self.org.save()
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Zenvia")
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_zenvia'))
post_data = response.context['form'].initial
post_data['account'] = 'rapidpro.gw'
post_data['code'] = 'h7GpAIEp85'
post_data['shortcode'] = '28595'
response = self.client.post(reverse('channels.channel_claim_zenvia'), post_data)
channel = Channel.objects.get()
self.assertEquals('BR', channel.country)
self.assertEquals(post_data['account'], channel.config_json()['account'])
self.assertEquals(post_data['code'], channel.config_json()['code'])
self.assertEquals(post_data['shortcode'], channel.address)
self.assertEquals('ZV', channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('api.zenvia_handler', args=['status', channel.uuid]))
self.assertContains(response, reverse('api.zenvia_handler', args=['receive', channel.uuid]))
def test_claim_africa(self):
Channel.objects.all().delete()
self.login(self.admin)
# visit the africa's talking page
response = self.client.get(reverse('channels.channel_claim_africas_talking'))
self.assertEquals(200, response.status_code)
post_data = response.context['form'].initial
post_data['shortcode'] = '5259'
post_data['username'] = 'temba'
post_data['api_key'] = 'asdf-asdf-asdf-asdf-asdf'
response = self.client.post(reverse('channels.channel_claim_africas_talking'), post_data)
channel = Channel.objects.get()
self.assertEquals('temba', channel.config_json()['username'])
self.assertEquals('asdf-asdf-asdf-asdf-asdf', channel.config_json()['api_key'])
self.assertEquals('5259', channel.address)
self.assertEquals('KE', channel.country)
self.assertEquals('AT', channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('api.africas_talking_handler', args=['callback', channel.uuid]))
self.assertContains(response, reverse('api.africas_talking_handler', args=['delivery', channel.uuid]))
@override_settings(SEND_EMAILS=True)
def test_disconnected_alert(self):
# set our last seen to a while ago
self.channel.alert_email = 'fred@unicef.org'
self.channel.last_seen = timezone.now() - timedelta(minutes=40)
self.channel.save()
check_channels_task()
# should have created one alert
alert = Alert.objects.get()
self.assertEquals(self.channel, alert.channel)
self.assertEquals(ALERT_DISCONNECTED, alert.alert_type)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 1)
template = 'channels/email/disconnected_alert.txt'
host = getattr(settings, 'HOSTNAME', 'rapidpro.io')
context = dict(org=self.channel.org, channel=self.channel, now=timezone.now(),
branding=dict(name="RapidPro", host=host),
last_seen=self.channel.last_seen, sync=alert.sync_event)
text_template = loader.get_template(template)
text = text_template.render(Context(context))
self.assertEquals(mail.outbox[0].body, text)
# call it again
check_channels_task()
# still only one alert
self.assertEquals(1, Alert.objects.all().count())
self.assertTrue(len(mail.outbox) == 1)
# ok, let's have the channel show up again
self.channel.last_seen = timezone.now() + timedelta(minutes=5)
self.channel.save()
check_channels_task()
# still only one alert, but it is now ended
alert = Alert.objects.get()
self.assertTrue(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
template = 'channels/email/connected_alert.txt'
host = getattr(settings, 'HOSTNAME', 'rapidpro.io')
context = dict(org=self.channel.org, channel=self.channel, now=timezone.now(),
branding=dict(name="RapidPro", host=host),
last_seen=self.channel.last_seen, sync=alert.sync_event)
text_template = loader.get_template(template)
text = text_template.render(Context(context))
self.assertEquals(mail.outbox[1].body, text)
def test_infobip(self):
Channel.objects.all().delete()
self.login(self.admin)
# should always see infobip as an option
response = self.client.get(reverse('channels.channel_claim'))
self.assertContains(response, "Infobip")
# try to claim a channel
response = self.client.get(reverse('channels.channel_claim_infobip'))
post_data = response.context['form'].initial
post_data['country'] = 'NI'
post_data['number'] = '250788123123'
post_data['username'] = 'user1'
post_data['password'] = 'pass1'
response = self.client.post(reverse('channels.channel_claim_infobip'), post_data)
channel = Channel.objects.get()
self.assertEquals('NI', channel.country)
self.assertEquals(post_data['username'], channel.config_json()['username'])
self.assertEquals(post_data['password'], channel.config_json()['password'])
self.assertEquals('+250788123123', channel.address)
self.assertEquals('IB', channel.channel_type)
config_url = reverse('channels.channel_configuration', args=[channel.pk])
self.assertRedirect(response, config_url)
response = self.client.get(config_url)
self.assertEquals(200, response.status_code)
self.assertContains(response, reverse('api.infobip_handler', args=['received', channel.uuid]))
self.assertContains(response, reverse('api.infobip_handler', args=['delivered', channel.uuid]))
@override_settings(SEND_EMAILS=True)
def test_sms_alert(self):
contact = self.create_contact("John Doe", '123')
# create a message from two hours ago
one_hour_ago = timezone.now() - timedelta(hours=1)
two_hours_ago = timezone.now() - timedelta(hours=2)
three_hours_ago = timezone.now() - timedelta(hours=3)
four_hours_ago = timezone.now() - timedelta(hours=4)
five_hours_ago = timezone.now() - timedelta(hours=5)
six_hours_ago = timezone.now() - timedelta(hours=6)
msg1 = self.create_msg(text="Message One", contact=contact, created_on=five_hours_ago, status='Q')
# make sure our channel has been seen recently
self.channel.last_seen = timezone.now()
self.channel.alert_email = 'fred@unicef.org'
self.channel.org = self.org
self.channel.save()
# ok check on our channel
check_channels_task()
# we don't have successfully sent message and we have an alert and only one
self.assertEquals(Alert.objects.all().count(), 1)
alert = Alert.objects.get()
self.assertEquals(self.channel, alert.channel)
self.assertEquals(ALERT_SMS, alert.alert_type)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 1)
# let's end the alert
alert = Alert.objects.all()[0]
alert.ended_on = six_hours_ago
alert.save()
dany = self.create_contact("Dany Craig", "765")
# let have a recent sent message
sent_msg = self.create_msg(text="SENT Message", contact=dany, created_on=four_hours_ago, sent_on=one_hour_ago, status='D')
# ok check on our channel
check_channels_task()
# if latest_sent_message is after our queued message no alert is created
self.assertEquals(Alert.objects.all().count(), 1)
# consider the sent message was sent before our queued msg
sent_msg.sent_on = three_hours_ago
sent_msg.save()
msg1.created_on = two_hours_ago
msg1.save()
# check our channel again
check_channels_task()
# no new alert created because we sent one in the past hour
self.assertEquals(Alert.objects.all().count(), 1)
alert = Alert.objects.all()[0]
alert.created_on = six_hours_ago
alert.save()
# check our channel again
check_channels_task()
# this time we have a new alert and should create only one
self.assertEquals(Alert.objects.all().count(), 2)
# get the alert which is not ended
alert = Alert.objects.get(ended_on=None)
self.assertEquals(self.channel, alert.channel)
self.assertEquals(ALERT_SMS, alert.alert_type)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
# run again, nothing should change
check_channels_task()
alert = Alert.objects.get(ended_on=None)
self.assertFalse(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
# fix our message
msg1.status = 'D'
msg1.save()
# run again, our alert should end
check_channels_task()
# still only one alert though, and no new email sent, alert must not be ended before one hour
alert = Alert.objects.all().latest('ended_on')
self.assertTrue(alert.ended_on)
self.assertTrue(len(mail.outbox) == 2)
|
Stanford-Legal-Tech-Design/legaltech-rapidpro
|
temba/channels/tests.py
|
Python
|
agpl-3.0
| 78,039
|
[
"VisIt"
] |
76893f7ec1e6f8cd433745e715660ec02324d45d683352cce25903c5f9b25558
|
import unittest
from f90wrap import fortran, parser, transform
from . import test_samples_dir
class TestTransform(unittest.TestCase):
def setUp(self):
self.root = parser.read_files([str(test_samples_dir/'circle.f90')])
def test_resolve_interface_prototypes(self):
''' Verify procedures gets moved into interface objects '''
new = transform.ResolveInterfacePrototypes().visit(self.root)
m = new.modules[0]
self.assertEqual(len(m.procedures), 6)
self.assertTrue(isinstance(
m.interfaces[0].procedures[0],
fortran.Function
))
def test_parse_dnad(self):
root = parser.read_files([str(test_samples_dir/'DNAD.fpp')])
new = transform.ResolveInterfacePrototypes().visit(root)
m = new.modules[0]
self.assertEqual(len(m.procedures), 1)
# TODO: Fix incomplete resolution of prototypes
# This is because both interfaces reference the same procedure
# but we only resolve first reference of a given procedure.
self.assertIsInstance(m.interfaces[12].procedures[0], fortran.Function)
self.assertIsInstance(m.interfaces[13].procedures[0], fortran.Prototype)
def test_resolve_binding_prototypes(self):
''' Verify procedures gets moved into binding objects '''
new = transform.ResolveBindingPrototypes().visit(self.root)
m = new.modules[0]
t = m.types[0]
b_normal = t.bindings[0]
b_generic = t.bindings[2]
b_final = t.bindings[3]
self.assertEqual(len(m.procedures), 2)
self.assertEqual(len(b_normal.procedures), 1)
self.assertEqual(len(b_generic.procedures), 2)
self.assertIn('destructor', b_final.attributes)
self.assertTrue(isinstance(
b_normal.procedures[0],
fortran.Function
))
def test_bind_constructor_interfaces(self):
''' Verify interfaces with same name as type become constructors '''
new = transform.ResolveInterfacePrototypes().visit(self.root)
new = transform.BindConstructorInterfaces().visit(new)
m = new.modules[0]
t = m.types[0]
self.assertEqual(len(m.interfaces), 0)
self.assertEqual(len(t.interfaces), 1)
self.assertIn('constructor', t.interfaces[0].attributes)
def test_generic_tranform(self):
types = fortran.find_types(self.root)
mods = { type.mod_name: type.mod_name for _,type in types.items()}
new = transform.transform_to_generic_wrapper(self.root,
types=types,
callbacks=[],
constructors=[],
destructors=[],
short_names={},
init_lines={},
kept_subs=[],
kept_mods=[],
argument_name_map={},
move_methods=True,
shorten_routine_names=[],
modules_for_type=mods,
remove_optional_arguments=False,
force_public=[],
)
m = new.modules[0]
t = m.types[0]
self.assertEqual(len(m.procedures), 0)
self.assertEqual(len(t.elements), 0)
self.assertEqual(len(t.bindings), 4)
self.assertEqual(len(t.interfaces), 1)
|
jameskermode/f90wrap
|
test/test_transform.py
|
Python
|
lgpl-3.0
| 3,241
|
[
"VisIt"
] |
d1e717f04f97a091d1bef003c582d8197162f4b75d9377e4d2d6d210fe419bbc
|
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyMinimization(alpha, beta):
NumberMCcycles= 10000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = np.zeros((2), np.double)
DeltaPsi = np.zeros((2), np.double)
DerivativePsiE = np.zeros((2), np.double)
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
alpha = 0.95
beta = 0.3
# Set up iteration using stochastic gradient method
Energy = 0
EDerivative = np.zeros((2), np.double)
# Learning rate eta, max iterations, need to change to adaptive learning rate
eta = 0.01
MaxIterations = 50
Niterations = 0
while Niterations <= MaxIterations:
Energy, EDerivative = EnergyMinimization(alpha,beta)
alphagradient = EDerivative[0]
betagradient = EDerivative[1]
alpha -= eta*alphagradient
beta -= eta*betagradient
Niterations += 1
print(alpha, beta)
print(Energy, EDerivative[0], EDerivative[1])
|
CompPhysics/ComputationalPhysics2
|
doc/Programs/ConjugateGradient/python/qdoteminim.py
|
Python
|
cc0-1.0
| 5,318
|
[
"Gaussian"
] |
129a376f503f137e650056167eb88ea994310181022f6a42ace466c8a7eacc41
|
# Copyright 2009-2010 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# This module is for reading and writing FASTQ and QUAL format files as
# SeqRecord objects, and is expected to be used via the Bio.SeqIO API.
"""Bio.SeqIO support for the FASTQ and QUAL file formats.
Note that you are expected to use this code via the Bio.SeqIO interface, as
shown below.
The FASTQ file format is used frequently at the Wellcome Trust Sanger Institute
to bundle a FASTA sequence and its PHRED quality data (integers between 0 and
90). Rather than using a single FASTQ file, often paired FASTA and QUAL files
are used containing the sequence and the quality information separately.
The PHRED software reads DNA sequencing trace files, calls bases, and
assigns a non-negative quality value to each called base using a logged
transformation of the error probability, Q = -10 log10( Pe ), for example::
Pe = 1.0, Q = 0
Pe = 0.1, Q = 10
Pe = 0.01, Q = 20
...
Pe = 0.00000001, Q = 80
Pe = 0.000000001, Q = 90
In typical raw sequence reads, the PHRED quality valuea will be from 0 to 40.
In the QUAL format these quality values are held as space separated text in
a FASTA like file format. In the FASTQ format, each quality values is encoded
with a single ASCI character using chr(Q+33), meaning zero maps to the
character "!" and for example 80 maps to "q". For the Sanger FASTQ standard
the allowed range of PHRED scores is 0 to 93 inclusive. The sequences and
quality are then stored in pairs in a FASTA like format.
Unfortunately there is no official document describing the FASTQ file format,
and worse, several related but different variants exist. For more details,
please read this open access publication::
The Sanger FASTQ file format for sequences with quality scores, and the
Solexa/Illumina FASTQ variants.
P.J.A.Cock (Biopython), C.J.Fields (BioPerl), N.Goto (BioRuby),
M.L.Heuer (BioJava) and P.M. Rice (EMBOSS).
Nucleic Acids Research 2010 38(6):1767-1771
http://dx.doi.org/10.1093/nar/gkp1137
The good news is that Roche 454 sequencers can output files in the QUAL format,
and sensibly they use PHREP style scores like Sanger. Converting a pair of
FASTA and QUAL files into a Sanger style FASTQ file is easy. To extract QUAL
files from a Roche 454 SFF binary file, use the Roche off instrument command
line tool "sffinfo" with the -q or -qual argument. You can extract a matching
FASTA file using the -s or -seq argument instead.
The bad news is that Solexa/Illumina did things differently - they have their
own scoring system AND their own incompatible versions of the FASTQ format.
Solexa/Illumina quality scores use Q = - 10 log10 ( Pe / (1-Pe) ), which can
be negative. PHRED scores and Solexa scores are NOT interchangeable (but a
reasonable mapping can be achieved between them, and they are approximately
equal for higher quality reads).
Confusingly early Solexa pipelines produced a FASTQ like file but using their
own score mapping and an ASCII offset of 64. To make things worse, for the
Solexa/Illumina pipeline 1.3 onwards, they introduced a third variant of the
FASTQ file format, this time using PHRED scores (which is more consistent) but
with an ASCII offset of 64.
i.e. There are at least THREE different and INCOMPATIBLE variants of the FASTQ
file format: The original Sanger PHRED standard, and two from Solexa/Illumina.
The good news is that as of CASAVA version 1.8, Illumina sequencers will
produce FASTQ files using the standard Sanger encoding.
You are expected to use this module via the Bio.SeqIO functions, with the
following format names:
- "qual" means simple quality files using PHRED scores (e.g. from Roche 454)
- "fastq" means Sanger style FASTQ files using PHRED scores and an ASCII
offset of 33 (e.g. from the NCBI Short Read Archive and Illumina 1.8+).
These can potentially hold PHRED scores from 0 to 93.
- "fastq-sanger" is an alias for "fastq".
- "fastq-solexa" means old Solexa (and also very early Illumina) style FASTQ
files, using Solexa scores with an ASCII offset 64. These can hold Solexa
scores from -5 to 62.
- "fastq-illumina" means newer Illumina 1.3 to 1.7 style FASTQ files, using
PHRED scores but with an ASCII offset 64, allowing PHRED scores from 0
to 62.
We could potentially add support for "qual-solexa" meaning QUAL files which
contain Solexa scores, but thus far there isn't any reason to use such files.
For example, consider the following short FASTQ file::
@EAS54_6_R1_2_1_413_324
CCCTTCTTGTCTTCAGCGTTTCTCC
+
;;3;;;;;;;;;;;;7;;;;;;;88
@EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
+
;;;;;;;;;;;7;;;;;-;;;3;83
@EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
+
;;;;;;;;;;;9;7;;.7;393333
This contains three reads of length 25. From the read length these were
probably originally from an early Solexa/Illumina sequencer but this file
follows the Sanger FASTQ convention (PHRED style qualities with an ASCII
offet of 33). This means we can parse this file using Bio.SeqIO using
"fastq" as the format name:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Quality/example.fastq", "fastq"):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 CCCTTCTTGTCTTCAGCGTTTCTCC
EAS54_6_R1_2_1_540_792 TTGGCAGGCCAAGGCCGATGGATCA
EAS54_6_R1_2_1_443_348 GTTGCTTCTGGCGTGGGTGGGGGGG
The qualities are held as a list of integers in each record's annotation:
>>> print record
ID: EAS54_6_R1_2_1_443_348
Name: EAS54_6_R1_2_1_443_348
Description: EAS54_6_R1_2_1_443_348
Number of features: 0
Per letter annotation for: phred_quality
Seq('GTTGCTTCTGGCGTGGGTGGGGGGG', SingleLetterAlphabet())
>>> print record.letter_annotations["phred_quality"]
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
You can use the SeqRecord format method to show this in the QUAL format:
>>> print record.format("qual")
>EAS54_6_R1_2_1_443_348
26 26 26 26 26 26 26 26 26 26 26 24 26 22 26 26 13 22 26 18
24 18 18 18 18
<BLANKLINE>
Or go back to the FASTQ format, use "fastq" (or "fastq-sanger"):
>>> print record.format("fastq")
@EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
+
;;;;;;;;;;;9;7;;.7;393333
<BLANKLINE>
Or, using the Illumina 1.3+ FASTQ encoding (PHRED values with an ASCII offset
of 64):
>>> print record.format("fastq-illumina")
@EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
+
ZZZZZZZZZZZXZVZZMVZRXRRRR
<BLANKLINE>
You can also get Biopython to convert the scores and show a Solexa style
FASTQ file:
>>> print record.format("fastq-solexa")
@EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
+
ZZZZZZZZZZZXZVZZMVZRXRRRR
<BLANKLINE>
Notice that this is actually the same output as above using "fastq-illumina"
as the format! The reason for this is all these scores are high enough that
the PHRED and Solexa scores are almost equal. The differences become apparent
for poor quality reads. See the functions solexa_quality_from_phred and
phred_quality_from_solexa for more details.
If you wanted to trim your sequences (perhaps to remove low quality regions,
or to remove a primer sequence), try slicing the SeqRecord objects. e.g.
>>> sub_rec = record[5:15]
>>> print sub_rec
ID: EAS54_6_R1_2_1_443_348
Name: EAS54_6_R1_2_1_443_348
Description: EAS54_6_R1_2_1_443_348
Number of features: 0
Per letter annotation for: phred_quality
Seq('TTCTGGCGTG', SingleLetterAlphabet())
>>> print sub_rec.letter_annotations["phred_quality"]
[26, 26, 26, 26, 26, 26, 24, 26, 22, 26]
>>> print sub_rec.format("fastq")
@EAS54_6_R1_2_1_443_348
TTCTGGCGTG
+
;;;;;;9;7;
<BLANKLINE>
If you wanted to, you could read in this FASTQ file, and save it as a QUAL file:
>>> from Bio import SeqIO
>>> record_iterator = SeqIO.parse("Quality/example.fastq", "fastq")
>>> out_handle = open("Quality/temp.qual", "w")
>>> SeqIO.write(record_iterator, out_handle, "qual")
3
>>> out_handle.close()
You can of course read in a QUAL file, such as the one we just created:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Quality/temp.qual", "qual"):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 ?????????????????????????
EAS54_6_R1_2_1_540_792 ?????????????????????????
EAS54_6_R1_2_1_443_348 ?????????????????????????
Notice that QUAL files don't have a proper sequence present! But the quality
information is there:
>>> print record
ID: EAS54_6_R1_2_1_443_348
Name: EAS54_6_R1_2_1_443_348
Description: EAS54_6_R1_2_1_443_348
Number of features: 0
Per letter annotation for: phred_quality
UnknownSeq(25, alphabet = SingleLetterAlphabet(), character = '?')
>>> print record.letter_annotations["phred_quality"]
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
Just to keep things tidy, if you are following this example yourself, you can
delete this temporary file now:
>>> import os
>>> os.remove("Quality/temp.qual")
Sometimes you won't have a FASTQ file, but rather just a pair of FASTA and QUAL
files. Because the Bio.SeqIO system is designed for reading single files, you
would have to read the two in separately and then combine the data. However,
since this is such a common thing to want to do, there is a helper iterator
defined in this module that does this for you - PairedFastaQualIterator.
Alternatively, if you have enough RAM to hold all the records in memory at once,
then a simple dictionary approach would work:
>>> from Bio import SeqIO
>>> reads = SeqIO.to_dict(SeqIO.parse(open("Quality/example.fasta"), "fasta"))
>>> for rec in SeqIO.parse(open("Quality/example.qual"), "qual"):
... reads[rec.id].letter_annotations["phred_quality"]=rec.letter_annotations["phred_quality"]
You can then access any record by its key, and get both the sequence and the
quality scores.
>>> print reads["EAS54_6_R1_2_1_540_792"].format("fastq")
@EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
+
;;;;;;;;;;;7;;;;;-;;;3;83
<BLANKLINE>
It is important that you explicitly tell Bio.SeqIO which FASTQ variant you are
using ("fastq" or "fastq-sanger" for the Sanger standard using PHRED values,
"fastq-solexa" for the original Solexa/Illumina variant, or "fastq-illumina"
for the more recent variant), as this cannot be detected reliably
automatically.
To illustrate this problem, let's consider an artifical example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> from Bio.SeqRecord import SeqRecord
>>> test = SeqRecord(Seq("NACGTACGTA", generic_dna), id="Test",
... description="Made up!")
>>> print test.format("fasta")
>Test Made up!
NACGTACGTA
<BLANKLINE>
>>> print test.format("fastq")
Traceback (most recent call last):
...
ValueError: No suitable quality scores found in letter_annotations of SeqRecord (id=Test).
We created a sample SeqRecord, and can show it in FASTA format - but for QUAL
or FASTQ format we need to provide some quality scores. These are held as a
list of integers (one for each base) in the letter_annotations dictionary:
>>> test.letter_annotations["phred_quality"] = [0,1,2,3,4,5,10,20,30,40]
>>> print test.format("qual")
>Test Made up!
0 1 2 3 4 5 10 20 30 40
<BLANKLINE>
>>> print test.format("fastq")
@Test Made up!
NACGTACGTA
+
!"#$%&+5?I
<BLANKLINE>
We can check this FASTQ encoding - the first PHRED quality was zero, and this
mapped to a exclamation mark, while the final score was 40 and this mapped to
the letter "I":
>>> ord('!') - 33
0
>>> ord('I') - 33
40
>>> [ord(letter)-33 for letter in '!"#$%&+5?I']
[0, 1, 2, 3, 4, 5, 10, 20, 30, 40]
Similarly, we could produce an Illumina 1.3 to 1.7 style FASTQ file using PHRED
scores with an offset of 64:
>>> print test.format("fastq-illumina")
@Test Made up!
NACGTACGTA
+
@ABCDEJT^h
<BLANKLINE>
And we can check this too - the first PHRED score was zero, and this mapped to
"@", while the final score was 40 and this mapped to "h":
>>> ord("@") - 64
0
>>> ord("h") - 64
40
>>> [ord(letter)-64 for letter in "@ABCDEJT^h"]
[0, 1, 2, 3, 4, 5, 10, 20, 30, 40]
Notice how different the standard Sanger FASTQ and the Illumina 1.3 to 1.7 style
FASTQ files look for the same data! Then we have the older Solexa/Illumina
format to consider which encodes Solexa scores instead of PHRED scores.
First let's see what Biopython says if we convert the PHRED scores into Solexa
scores (rounding to one decimal place):
>>> for q in [0,1,2,3,4,5,10,20,30,40]:
... print "PHRED %i maps to Solexa %0.1f" % (q, solexa_quality_from_phred(q))
PHRED 0 maps to Solexa -5.0
PHRED 1 maps to Solexa -5.0
PHRED 2 maps to Solexa -2.3
PHRED 3 maps to Solexa -0.0
PHRED 4 maps to Solexa 1.8
PHRED 5 maps to Solexa 3.3
PHRED 10 maps to Solexa 9.5
PHRED 20 maps to Solexa 20.0
PHRED 30 maps to Solexa 30.0
PHRED 40 maps to Solexa 40.0
Now here is the record using the old Solexa style FASTQ file:
>>> print test.format("fastq-solexa")
@Test Made up!
NACGTACGTA
+
;;>@BCJT^h
<BLANKLINE>
Again, this is using an ASCII offset of 64, so we can check the Solexa scores:
>>> [ord(letter)-64 for letter in ";;>@BCJT^h"]
[-5, -5, -2, 0, 2, 3, 10, 20, 30, 40]
This explains why the last few letters of this FASTQ output matched that using
the Illumina 1.3 to 1.7 format - high quality PHRED scores and Solexa scores
are approximately equal.
"""
__docformat__ = "epytext en" #Don't just use plain text in epydoc API pages!
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq, UnknownSeq
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
from math import log
import warnings
from Bio import BiopythonWarning, BiopythonParserWarning
# define score offsets. See discussion for differences between Sanger and
# Solexa offsets.
SANGER_SCORE_OFFSET = 33
SOLEXA_SCORE_OFFSET = 64
def solexa_quality_from_phred(phred_quality):
"""Covert a PHRED quality (range 0 to about 90) to a Solexa quality.
PHRED and Solexa quality scores are both log transformations of a
probality of error (high score = low probability of error). This function
takes a PHRED score, transforms it back to a probability of error, and
then re-expresses it as a Solexa score. This assumes the error estimates
are equivalent.
How does this work exactly? Well the PHRED quality is minus ten times the
base ten logarithm of the probability of error::
phred_quality = -10*log(error,10)
Therefore, turning this round::
error = 10 ** (- phred_quality / 10)
Now, Solexa qualities use a different log transformation::
solexa_quality = -10*log(error/(1-error),10)
After substitution and a little manipulation we get::
solexa_quality = 10*log(10**(phred_quality/10.0) - 1, 10)
However, real Solexa files use a minimum quality of -5. This does have a
good reason - a random a random base call would be correct 25% of the time,
and thus have a probability of error of 0.75, which gives 1.25 as the PHRED
quality, or -4.77 as the Solexa quality. Thus (after rounding), a random
nucleotide read would have a PHRED quality of 1, or a Solexa quality of -5.
Taken literally, this logarithic formula would map a PHRED quality of zero
to a Solexa quality of minus infinity. Of course, taken literally, a PHRED
score of zero means a probability of error of one (i.e. the base call is
definitely wrong), which is worse than random! In practice, a PHRED quality
of zero usually means a default value, or perhaps random - and therefore
mapping it to the minimum Solexa score of -5 is reasonable.
In conclusion, we follow EMBOSS, and take this logarithmic formula but also
apply a minimum value of -5.0 for the Solexa quality, and also map a PHRED
quality of zero to -5.0 as well.
Note this function will return a floating point number, it is up to you to
round this to the nearest integer if appropriate. e.g.
>>> print "%0.2f" % round(solexa_quality_from_phred(80),2)
80.00
>>> print "%0.2f" % round(solexa_quality_from_phred(50),2)
50.00
>>> print "%0.2f" % round(solexa_quality_from_phred(20),2)
19.96
>>> print "%0.2f" % round(solexa_quality_from_phred(10),2)
9.54
>>> print "%0.2f" % round(solexa_quality_from_phred(5),2)
3.35
>>> print "%0.2f" % round(solexa_quality_from_phred(4),2)
1.80
>>> print "%0.2f" % round(solexa_quality_from_phred(3),2)
-0.02
>>> print "%0.2f" % round(solexa_quality_from_phred(2),2)
-2.33
>>> print "%0.2f" % round(solexa_quality_from_phred(1),2)
-5.00
>>> print "%0.2f" % round(solexa_quality_from_phred(0),2)
-5.00
Notice that for high quality reads PHRED and Solexa scores are numerically
equal. The differences are important for poor quality reads, where PHRED
has a minimum of zero but Solexa scores can be negative.
Finally, as a special case where None is used for a "missing value", None
is returned:
>>> print solexa_quality_from_phred(None)
None
"""
if phred_quality is None:
#Assume None is used as some kind of NULL or NA value; return None
#e.g. Bio.SeqIO gives Ace contig gaps a quality of None.
return None
elif phred_quality > 0:
#Solexa uses a minimum value of -5, which after rounding matches a
#random nucleotide base call.
return max(-5.0, 10*log(10**(phred_quality/10.0) - 1, 10))
elif phred_quality == 0:
#Special case, map to -5 as discussed in the docstring
return -5.0
else:
raise ValueError("PHRED qualities must be positive (or zero), not %s" \
% repr(phred_quality))
def phred_quality_from_solexa(solexa_quality):
"""Convert a Solexa quality (which can be negative) to a PHRED quality.
PHRED and Solexa quality scores are both log transformations of a
probality of error (high score = low probability of error). This function
takes a Solexa score, transforms it back to a probability of error, and
then re-expresses it as a PHRED score. This assumes the error estimates
are equivalent.
The underlying formulas are given in the documentation for the sister
function solexa_quality_from_phred, in this case the operation is::
phred_quality = 10*log(10**(solexa_quality/10.0) + 1, 10)
This will return a floating point number, it is up to you to round this to
the nearest integer if appropriate. e.g.
>>> print "%0.2f" % round(phred_quality_from_solexa(80),2)
80.00
>>> print "%0.2f" % round(phred_quality_from_solexa(20),2)
20.04
>>> print "%0.2f" % round(phred_quality_from_solexa(10),2)
10.41
>>> print "%0.2f" % round(phred_quality_from_solexa(0),2)
3.01
>>> print "%0.2f" % round(phred_quality_from_solexa(-5),2)
1.19
Note that a solexa_quality less then -5 is not expected, will trigger a
warning, but will still be converted as per the logarithmic mapping
(giving a number between 0 and 1.19 back).
As a special case where None is used for a "missing value", None is
returned:
>>> print phred_quality_from_solexa(None)
None
"""
if solexa_quality is None:
#Assume None is used as some kind of NULL or NA value; return None
return None
if solexa_quality < -5:
warnings.warn("Solexa quality less than -5 passed, %s" \
% repr(solexa_quality), BiopythonWarning)
return 10*log(10**(solexa_quality/10.0) + 1, 10)
def _get_phred_quality(record):
"""Extract PHRED qualities from a SeqRecord's letter_annotations (PRIVATE).
If there are no PHRED qualities, but there are Solexa qualities, those are
used instead after conversion.
"""
try:
return record.letter_annotations["phred_quality"]
except KeyError:
pass
try:
return [phred_quality_from_solexa(q) for \
q in record.letter_annotations["solexa_quality"]]
except KeyError:
raise ValueError("No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." \
% record.id)
#Only map 0 to 93, we need to give a warning on truncating at 93
_phred_to_sanger_quality_str = dict((qp, chr(min(126, qp+SANGER_SCORE_OFFSET))) \
for qp in range(0, 93+1))
#Only map -5 to 93, we need to give a warning on truncating at 93
_solexa_to_sanger_quality_str = dict( \
(qs, chr(min(126, int(round(phred_quality_from_solexa(qs)))+SANGER_SCORE_OFFSET))) \
for qs in range(-5, 93+1))
def _get_sanger_quality_str(record):
"""Returns a Sanger FASTQ encoded quality string (PRIVATE).
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> r = SeqRecord(Seq("ACGTAN"), id="Test",
... letter_annotations = {"phred_quality":[50,40,30,20,10,0]})
>>> _get_sanger_quality_str(r)
'SI?5+!'
If as in the above example (or indeed a SeqRecord parser with Bio.SeqIO),
the PHRED qualities are integers, this function is able to use a very fast
pre-cached mapping. However, if they are floats which differ slightly, then
it has to do the appropriate rounding - which is slower:
>>> r2 = SeqRecord(Seq("ACGTAN"), id="Test2",
... letter_annotations = {"phred_quality":[50.0,40.05,29.99,20,9.55,0.01]})
>>> _get_sanger_quality_str(r2)
'SI?5+!'
If your scores include a None value, this raises an exception:
>>> r3 = SeqRecord(Seq("ACGTAN"), id="Test3",
... letter_annotations = {"phred_quality":[50,40,30,20,10,None]})
>>> _get_sanger_quality_str(r3)
Traceback (most recent call last):
...
TypeError: A quality value of None was found
If (strangely) your record has both PHRED and Solexa scores, then the PHRED
scores are used in preference:
>>> r4 = SeqRecord(Seq("ACGTAN"), id="Test4",
... letter_annotations = {"phred_quality":[50,40,30,20,10,0],
... "solexa_quality":[-5,-4,0,None,0,40]})
>>> _get_sanger_quality_str(r4)
'SI?5+!'
If there are no PHRED scores, but there are Solexa scores, these are used
instead (after the approriate conversion):
>>> r5 = SeqRecord(Seq("ACGTAN"), id="Test5",
... letter_annotations = {"solexa_quality":[40,30,20,10,0,-5]})
>>> _get_sanger_quality_str(r5)
'I?5+$"'
Again, integer Solexa scores can be looked up in a pre-cached mapping making
this very fast. You can still use approximate floating point scores:
>>> r6 = SeqRecord(Seq("ACGTAN"), id="Test6",
... letter_annotations = {"solexa_quality":[40.1,29.7,20.01,10,0.0,-4.9]})
>>> _get_sanger_quality_str(r6)
'I?5+$"'
Notice that due to the limited range of printable ASCII characters, a
PHRED quality of 93 is the maximum that can be held in an Illumina FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation.
"""
#TODO - This functions works and is fast, but it is also ugly
#and there is considerable repetition of code for the other
#two FASTQ variants.
try:
#These take priority (in case both Solexa and PHRED scores found)
qualities = record.letter_annotations["phred_quality"]
except KeyError:
#Fall back on solexa scores...
pass
else:
#Try and use the precomputed mapping:
try:
return "".join([_phred_to_sanger_quality_str[qp] \
for qp in qualities])
except KeyError:
#Could be a float, or a None in the list, or a high value.
pass
if None in qualities:
raise TypeError("A quality value of None was found")
if max(qualities) >= 93.5:
warnings.warn("Data loss - max PHRED quality 93 in Sanger FASTQ",
BiopythonWarning)
#This will apply the truncation at 93, giving max ASCII 126
return "".join([chr(min(126, int(round(qp))+SANGER_SCORE_OFFSET)) \
for qp in qualities])
#Fall back on the Solexa scores...
try:
qualities = record.letter_annotations["solexa_quality"]
except KeyError:
raise ValueError("No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." \
% record.id)
#Try and use the precomputed mapping:
try:
return "".join([_solexa_to_sanger_quality_str[qs] \
for qs in qualities])
except KeyError:
#Either no PHRED scores, or something odd like a float or None
pass
if None in qualities:
raise TypeError("A quality value of None was found")
#Must do this the slow way, first converting the PHRED scores into
#Solexa scores:
if max(qualities) >= 93.5:
warnings.warn("Data loss - max PHRED quality 93 in Sanger FASTQ",
BiopythonWarning)
#This will apply the truncation at 93, giving max ASCII 126
return "".join([chr(min(126, int(round(phred_quality_from_solexa(qs)))+SANGER_SCORE_OFFSET)) \
for qs in qualities])
#Only map 0 to 62, we need to give a warning on truncating at 62
assert 62+SOLEXA_SCORE_OFFSET == 126
_phred_to_illumina_quality_str = dict((qp, chr(qp+SOLEXA_SCORE_OFFSET)) \
for qp in range(0, 62+1))
#Only map -5 to 62, we need to give a warning on truncating at 62
_solexa_to_illumina_quality_str = dict( \
(qs, chr(int(round(phred_quality_from_solexa(qs)))+SOLEXA_SCORE_OFFSET)) \
for qs in range(-5, 62+1))
def _get_illumina_quality_str(record):
"""Returns an Illumina 1.3 to 1.7 FASTQ encoded quality string (PRIVATE).
Notice that due to the limited range of printable ASCII characters, a
PHRED quality of 62 is the maximum that can be held in an Illumina FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation.
"""
#TODO - This functions works and is fast, but it is also ugly
#and there is considerable repetition of code for the other
#two FASTQ variants.
try:
#These take priority (in case both Solexa and PHRED scores found)
qualities = record.letter_annotations["phred_quality"]
except KeyError:
#Fall back on solexa scores...
pass
else:
#Try and use the precomputed mapping:
try:
return "".join([_phred_to_illumina_quality_str[qp] \
for qp in qualities])
except KeyError:
#Could be a float, or a None in the list, or a high value.
pass
if None in qualities:
raise TypeError("A quality value of None was found")
if max(qualities) >= 62.5:
warnings.warn("Data loss - max PHRED quality 62 in Illumina FASTQ",
BiopythonWarning)
#This will apply the truncation at 62, giving max ASCII 126
return "".join([chr(min(126, int(round(qp))+SOLEXA_SCORE_OFFSET)) \
for qp in qualities])
#Fall back on the Solexa scores...
try:
qualities = record.letter_annotations["solexa_quality"]
except KeyError:
raise ValueError("No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." \
% record.id)
#Try and use the precomputed mapping:
try:
return "".join([_solexa_to_illumina_quality_str[qs] \
for qs in qualities])
except KeyError:
#Either no PHRED scores, or something odd like a float or None
pass
if None in qualities:
raise TypeError("A quality value of None was found")
#Must do this the slow way, first converting the PHRED scores into
#Solexa scores:
if max(qualities) >= 62.5:
warnings.warn("Data loss - max PHRED quality 62 in Illumina FASTQ",
BiopythonWarning)
#This will apply the truncation at 62, giving max ASCII 126
return "".join([chr(min(126, int(round(phred_quality_from_solexa(qs)))+SOLEXA_SCORE_OFFSET)) \
for qs in qualities])
#Only map 0 to 62, we need to give a warning on truncating at 62
assert 62+SOLEXA_SCORE_OFFSET == 126
_solexa_to_solexa_quality_str = dict((qs, chr(min(126, qs+SOLEXA_SCORE_OFFSET))) \
for qs in range(-5, 62+1))
#Only map -5 to 62, we need to give a warning on truncating at 62
_phred_to_solexa_quality_str = dict(\
(qp, chr(min(126, int(round(solexa_quality_from_phred(qp)))+SOLEXA_SCORE_OFFSET))) \
for qp in range(0, 62+1))
def _get_solexa_quality_str(record):
"""Returns a Solexa FASTQ encoded quality string (PRIVATE).
Notice that due to the limited range of printable ASCII characters, a
Solexa quality of 62 is the maximum that can be held in a Solexa FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation.
"""
#TODO - This functions works and is fast, but it is also ugly
#and there is considerable repetition of code for the other
#two FASTQ variants.
try:
#These take priority (in case both Solexa and PHRED scores found)
qualities = record.letter_annotations["solexa_quality"]
except KeyError:
#Fall back on PHRED scores...
pass
else:
#Try and use the precomputed mapping:
try:
return "".join([_solexa_to_solexa_quality_str[qs] \
for qs in qualities])
except KeyError:
#Could be a float, or a None in the list, or a high value.
pass
if None in qualities:
raise TypeError("A quality value of None was found")
if max(qualities) >= 62.5:
warnings.warn("Data loss - max Solexa quality 62 in Solexa FASTQ",
BiopythonWarning)
#This will apply the truncation at 62, giving max ASCII 126
return "".join([chr(min(126, int(round(qs))+SOLEXA_SCORE_OFFSET)) \
for qs in qualities])
#Fall back on the PHRED scores...
try:
qualities = record.letter_annotations["phred_quality"]
except KeyError:
raise ValueError("No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." \
% record.id)
#Try and use the precomputed mapping:
try:
return "".join([_phred_to_solexa_quality_str[qp] \
for qp in qualities])
except KeyError:
#Either no PHRED scores, or something odd like a float or None
#or too big to be in the cache
pass
if None in qualities:
raise TypeError("A quality value of None was found")
#Must do this the slow way, first converting the PHRED scores into
#Solexa scores:
if max(qualities) >= 62.5:
warnings.warn("Data loss - max Solexa quality 62 in Solexa FASTQ",
BiopythonWarning)
return "".join([chr(min(126,
int(round(solexa_quality_from_phred(qp))) + \
SOLEXA_SCORE_OFFSET)) \
for qp in qualities])
#TODO - Default to nucleotide or even DNA?
def FastqGeneralIterator(handle):
"""Iterate over Fastq records as string tuples (not as SeqRecord objects).
This code does not try to interpret the quality string numerically. It
just returns tuples of the title, sequence and quality as strings. For
the sequence and quality, any whitespace (such as new lines) is removed.
Our SeqRecord based FASTQ iterators call this function internally, and then
turn the strings into a SeqRecord objects, mapping the quality string into
a list of numerical scores. If you want to do a custom quality mapping,
then you might consider calling this function directly.
For parsing FASTQ files, the title string from the "@" line at the start
of each record can optionally be omitted on the "+" lines. If it is
repeated, it must be identical.
The sequence string and the quality string can optionally be split over
multiple lines, although several sources discourage this. In comparison,
for the FASTA file format line breaks between 60 and 80 characters are
the norm.
WARNING - Because the "@" character can appear in the quality string,
this can cause problems as this is also the marker for the start of
a new sequence. In fact, the "+" sign can also appear as well. Some
sources recommended having no line breaks in the quality to avoid this,
but even that is not enough, consider this example::
@071113_EAS56_0053:1:1:998:236
TTTCTTGCCCCCATAGACTGAGACCTTCCCTAAATA
+071113_EAS56_0053:1:1:998:236
IIIIIIIIIIIIIIIIIIIIIIIIIIIIICII+III
@071113_EAS56_0053:1:1:182:712
ACCCAGCTAATTTTTGTATTTTTGTTAGAGACAGTG
+
@IIIIIIIIIIIIIIICDIIIII<%<6&-*).(*%+
@071113_EAS56_0053:1:1:153:10
TGTTCTGAAGGAAGGTGTGCGTGCGTGTGTGTGTGT
+
IIIIIIIIIIIICIIGIIIII>IAIIIE65I=II:6
@071113_EAS56_0053:1:3:990:501
TGGGAGGTTTTATGTGGA
AAGCAGCAATGTACAAGA
+
IIIIIII.IIIIII1@44
@-7.%<&+/$/%4(++(%
This is four PHRED encoded FASTQ entries originally from an NCBI source
(given the read length of 36, these are probably Solexa Illumna reads where
the quality has been mapped onto the PHRED values).
This example has been edited to illustrate some of the nasty things allowed
in the FASTQ format. Firstly, on the "+" lines most but not all of the
(redundant) identifiers are ommited. In real files it is likely that all or
none of these extra identifiers will be present.
Secondly, while the first three sequences have been shown without line
breaks, the last has been split over multiple lines. In real files any line
breaks are likely to be consistent.
Thirdly, some of the quality string lines start with an "@" character. For
the second record this is unavoidable. However for the fourth sequence this
only happens because its quality string is split over two lines. A naive
parser could wrongly treat any line starting with an "@" as the beginning of
a new sequence! This code copes with this possible ambiguity by keeping
track of the length of the sequence which gives the expected length of the
quality string.
Using this tricky example file as input, this short bit of code demonstrates
what this parsing function would return:
>>> handle = open("Quality/tricky.fastq", "rU")
>>> for (title, sequence, quality) in FastqGeneralIterator(handle):
... print title
... print sequence, quality
071113_EAS56_0053:1:1:998:236
TTTCTTGCCCCCATAGACTGAGACCTTCCCTAAATA IIIIIIIIIIIIIIIIIIIIIIIIIIIIICII+III
071113_EAS56_0053:1:1:182:712
ACCCAGCTAATTTTTGTATTTTTGTTAGAGACAGTG @IIIIIIIIIIIIIIICDIIIII<%<6&-*).(*%+
071113_EAS56_0053:1:1:153:10
TGTTCTGAAGGAAGGTGTGCGTGCGTGTGTGTGTGT IIIIIIIIIIIICIIGIIIII>IAIIIE65I=II:6
071113_EAS56_0053:1:3:990:501
TGGGAGGTTTTATGTGGAAAGCAGCAATGTACAAGA IIIIIII.IIIIII1@44@-7.%<&+/$/%4(++(%
>>> handle.close()
Finally we note that some sources state that the quality string should
start with "!" (which using the PHRED mapping means the first letter always
has a quality score of zero). This rather restrictive rule is not widely
observed, so is therefore ignored here. One plus point about this "!" rule
is that (provided there are no line breaks in the quality sequence) it
would prevent the above problem with the "@" character.
"""
#We need to call handle.readline() at least four times per record,
#so we'll save a property look up each time:
handle_readline = handle.readline
#Skip any text before the first record (e.g. blank lines, comments?)
while True:
line = handle_readline()
if not line:
return #Premature end of file, or just empty?
if line[0] == "@":
break
if isinstance(line[0], int):
raise ValueError("Is this handle in binary mode not text mode?")
while line:
if line[0] != "@":
raise ValueError("Records in Fastq files should start with '@' character")
title_line = line[1:].rstrip()
#Will now be at least one line of quality data - in most FASTQ files
#just one line! We therefore use string concatenation (if needed)
#rather using than the "".join(...) trick just in case it is multiline:
seq_string = handle_readline().rstrip()
#There may now be more sequence lines, or the "+" quality marker line:
while True:
line = handle_readline()
if not line:
raise ValueError("End of file without quality information.")
if line[0] == "+":
#The title here is optional, but if present must match!
second_title = line[1:].rstrip()
if second_title and second_title != title_line:
raise ValueError("Sequence and quality captions differ.")
break
seq_string += line.rstrip() #removes trailing newlines
#This is going to slow things down a little, but assuming
#this isn't allowed we should try and catch it here:
if " " in seq_string or "\t" in seq_string:
raise ValueError("Whitespace is not allowed in the sequence.")
seq_len = len(seq_string)
#Will now be at least one line of quality data...
quality_string = handle_readline().rstrip()
#There may now be more quality data, or another sequence, or EOF
while True:
line = handle_readline()
if not line : break #end of file
if line[0] == "@":
#This COULD be the start of a new sequence. However, it MAY just
#be a line of quality data which starts with a "@" character. We
#should be able to check this by looking at the sequence length
#and the amount of quality data found so far.
if len(quality_string) >= seq_len:
#We expect it to be equal if this is the start of a new record.
#If the quality data is longer, we'll raise an error below.
break
#Continue - its just some (more) quality data.
quality_string += line.rstrip()
if seq_len != len(quality_string):
raise ValueError("Lengths of sequence and quality values differs "
" for %s (%i and %i)." \
% (title_line, seq_len, len(quality_string)))
#Return the record and then continue...
yield (title_line, seq_string, quality_string)
raise StopIteration
#This is a generator function!
def FastqPhredIterator(handle, alphabet = single_letter_alphabet, title2ids = None):
"""Generator function to iterate over FASTQ records (as SeqRecord objects).
- handle - input file
- alphabet - optional alphabet
- title2ids - A function that, when given the title line from the FASTQ
file (without the beginning >), will return the id, name and
description (in that order) for the record as a tuple of
strings. If this is not given, then the entire title line
will be used as the description, and the first word as the
id and name.
Note that use of title2ids matches that of Bio.SeqIO.FastaIO.
For each sequence in a (Sanger style) FASTQ file there is a matching string
encoding the PHRED qualities (integers between 0 and about 90) using ASCII
values with an offset of 33.
For example, consider a file containing three short reads::
@EAS54_6_R1_2_1_413_324
CCCTTCTTGTCTTCAGCGTTTCTCC
+
;;3;;;;;;;;;;;;7;;;;;;;88
@EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
+
;;;;;;;;;;;7;;;;;-;;;3;83
@EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
+
;;;;;;;;;;;9;7;;.7;393333
For each sequence (e.g. "CCCTTCTTGTCTTCAGCGTTTCTCC") there is a matching
string encoding the PHRED qualities using a ASCI values with an offset of
33 (e.g. ";;3;;;;;;;;;;;;7;;;;;;;88").
Using this module directly you might run:
>>> handle = open("Quality/example.fastq", "rU")
>>> for record in FastqPhredIterator(handle):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 CCCTTCTTGTCTTCAGCGTTTCTCC
EAS54_6_R1_2_1_540_792 TTGGCAGGCCAAGGCCGATGGATCA
EAS54_6_R1_2_1_443_348 GTTGCTTCTGGCGTGGGTGGGGGGG
>>> handle.close()
Typically however, you would call this via Bio.SeqIO instead with "fastq"
(or "fastq-sanger") as the format:
>>> from Bio import SeqIO
>>> handle = open("Quality/example.fastq", "rU")
>>> for record in SeqIO.parse(handle, "fastq"):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 CCCTTCTTGTCTTCAGCGTTTCTCC
EAS54_6_R1_2_1_540_792 TTGGCAGGCCAAGGCCGATGGATCA
EAS54_6_R1_2_1_443_348 GTTGCTTCTGGCGTGGGTGGGGGGG
>>> handle.close()
If you want to look at the qualities, they are record in each record's
per-letter-annotation dictionary as a simple list of integers:
>>> print record.letter_annotations["phred_quality"]
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
"""
assert SANGER_SCORE_OFFSET == ord("!")
#Originally, I used a list expression for each record:
#
# qualities = [ord(letter)-SANGER_SCORE_OFFSET for letter in quality_string]
#
#Precomputing is faster, perhaps partly by avoiding the subtractions.
q_mapping = dict()
for letter in range(0, 255):
q_mapping[chr(letter)] = letter-SANGER_SCORE_OFFSET
for title_line, seq_string, quality_string in FastqGeneralIterator(handle):
if title2ids:
id, name, descr = title2ids(title_line)
else:
descr = title_line
id = descr.split()[0]
name = id
record = SeqRecord(Seq(seq_string, alphabet),
id=id, name=name, description=descr)
qualities = [q_mapping[letter] for letter in quality_string]
if qualities and (min(qualities) < 0 or max(qualities) > 93):
raise ValueError("Invalid character in quality string")
#For speed, will now use a dirty trick to speed up assigning the
#qualities. We do this to bypass the length check imposed by the
#per-letter-annotations restricted dict (as this has already been
#checked by FastqGeneralIterator). This is equivalent to:
#record.letter_annotations["phred_quality"] = qualities
dict.__setitem__(record._per_letter_annotations,
"phred_quality", qualities)
yield record
#This is a generator function!
def FastqSolexaIterator(handle, alphabet = single_letter_alphabet, title2ids = None):
r"""Parsing old Solexa/Illumina FASTQ like files (which differ in the quality mapping).
The optional arguments are the same as those for the FastqPhredIterator.
For each sequence in Solexa/Illumina FASTQ files there is a matching string
encoding the Solexa integer qualities using ASCII values with an offset
of 64. Solexa scores are scaled differently to PHRED scores, and Biopython
will NOT perform any automatic conversion when loading.
NOTE - This file format is used by the OLD versions of the Solexa/Illumina
pipeline. See also the FastqIlluminaIterator function for the NEW version.
For example, consider a file containing these five records::
@SLXA-B3_649_FC8437_R1_1_1_610_79
GATGTGCAATACCTTTGTAGAGGAA
+SLXA-B3_649_FC8437_R1_1_1_610_79
YYYYYYYYYYYYYYYYYYWYWYYSU
@SLXA-B3_649_FC8437_R1_1_1_397_389
GGTTTGAGAAAGAGAAATGAGATAA
+SLXA-B3_649_FC8437_R1_1_1_397_389
YYYYYYYYYWYYYYWWYYYWYWYWW
@SLXA-B3_649_FC8437_R1_1_1_850_123
GAGGGTGTTGATCATGATGATGGCG
+SLXA-B3_649_FC8437_R1_1_1_850_123
YYYYYYYYYYYYYWYYWYYSYYYSY
@SLXA-B3_649_FC8437_R1_1_1_362_549
GGAAACAAAGTTTTTCTCAACATAG
+SLXA-B3_649_FC8437_R1_1_1_362_549
YYYYYYYYYYYYYYYYYYWWWWYWY
@SLXA-B3_649_FC8437_R1_1_1_183_714
GTATTATTTAATGGCATACACTCAA
+SLXA-B3_649_FC8437_R1_1_1_183_714
YYYYYYYYYYWYYYYWYWWUWWWQQ
Using this module directly you might run:
>>> handle = open("Quality/solexa_example.fastq", "rU")
>>> for record in FastqSolexaIterator(handle):
... print record.id, record.seq
SLXA-B3_649_FC8437_R1_1_1_610_79 GATGTGCAATACCTTTGTAGAGGAA
SLXA-B3_649_FC8437_R1_1_1_397_389 GGTTTGAGAAAGAGAAATGAGATAA
SLXA-B3_649_FC8437_R1_1_1_850_123 GAGGGTGTTGATCATGATGATGGCG
SLXA-B3_649_FC8437_R1_1_1_362_549 GGAAACAAAGTTTTTCTCAACATAG
SLXA-B3_649_FC8437_R1_1_1_183_714 GTATTATTTAATGGCATACACTCAA
>>> handle.close()
Typically however, you would call this via Bio.SeqIO instead with
"fastq-solexa" as the format:
>>> from Bio import SeqIO
>>> handle = open("Quality/solexa_example.fastq", "rU")
>>> for record in SeqIO.parse(handle, "fastq-solexa"):
... print record.id, record.seq
SLXA-B3_649_FC8437_R1_1_1_610_79 GATGTGCAATACCTTTGTAGAGGAA
SLXA-B3_649_FC8437_R1_1_1_397_389 GGTTTGAGAAAGAGAAATGAGATAA
SLXA-B3_649_FC8437_R1_1_1_850_123 GAGGGTGTTGATCATGATGATGGCG
SLXA-B3_649_FC8437_R1_1_1_362_549 GGAAACAAAGTTTTTCTCAACATAG
SLXA-B3_649_FC8437_R1_1_1_183_714 GTATTATTTAATGGCATACACTCAA
>>> handle.close()
If you want to look at the qualities, they are recorded in each record's
per-letter-annotation dictionary as a simple list of integers:
>>> print record.letter_annotations["solexa_quality"]
[25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 23, 25, 25, 25, 25, 23, 25, 23, 23, 21, 23, 23, 23, 17, 17]
These scores aren't very good, but they are high enough that they map
almost exactly onto PHRED scores:
>>> print "%0.2f" % phred_quality_from_solexa(25)
25.01
Let's look at faked example read which is even worse, where there are
more noticeable differences between the Solexa and PHRED scores::
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+slxa_0001_1_0001_01
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
Again, you would typically use Bio.SeqIO to read this file in (rather than
calling the Bio.SeqIO.QualtityIO module directly). Most FASTQ files will
contain thousands of reads, so you would normally use Bio.SeqIO.parse()
as shown above. This example has only as one entry, so instead we can
use the Bio.SeqIO.read() function:
>>> from Bio import SeqIO
>>> handle = open("Quality/solexa_faked.fastq", "rU")
>>> record = SeqIO.read(handle, "fastq-solexa")
>>> handle.close()
>>> print record.id, record.seq
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print record.letter_annotations["solexa_quality"]
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
These quality scores are so low that when converted from the Solexa scheme
into PHRED scores they look quite different:
>>> print "%0.2f" % phred_quality_from_solexa(-1)
2.54
>>> print "%0.2f" % phred_quality_from_solexa(-5)
1.19
Note you can use the Bio.SeqIO.write() function or the SeqRecord's format
method to output the record(s):
>>> print record.format("fastq-solexa")
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
<BLANKLINE>
Note this output is slightly different from the input file as Biopython
has left out the optional repetition of the sequence identifier on the "+"
line. If you want the to use PHRED scores, use "fastq" or "qual" as the
output format instead, and Biopython will do the conversion for you:
>>> print record.format("fastq")
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+
IHGFEDCBA@?>=<;:9876543210/.-,++*)('&&%%$$##""
<BLANKLINE>
>>> print record.format("qual")
>slxa_0001_1_0001_01
40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21
20 19 18 17 16 15 14 13 12 11 10 10 9 8 7 6 5 5 4 4 3 3 2 2
1 1
<BLANKLINE>
As shown above, the poor quality Solexa reads have been mapped to the
equivalent PHRED score (e.g. -5 to 1 as shown earlier).
"""
q_mapping = dict()
for letter in range(0, 255):
q_mapping[chr(letter)] = letter-SOLEXA_SCORE_OFFSET
for title_line, seq_string, quality_string in FastqGeneralIterator(handle):
if title2ids:
id, name, descr = title_line
else:
descr = title_line
id = descr.split()[0]
name = id
record = SeqRecord(Seq(seq_string, alphabet),
id=id, name=name, description=descr)
qualities = [q_mapping[letter] for letter in quality_string]
#DO NOT convert these into PHRED qualities automatically!
if qualities and (min(qualities) < -5 or max(qualities)>62):
raise ValueError("Invalid character in quality string")
#Dirty trick to speed up this line:
#record.letter_annotations["solexa_quality"] = qualities
dict.__setitem__(record._per_letter_annotations,
"solexa_quality", qualities)
yield record
#This is a generator function!
def FastqIlluminaIterator(handle, alphabet = single_letter_alphabet, title2ids = None):
"""Parse Illumina 1.3 to 1.7 FASTQ like files (which differ in the quality mapping).
The optional arguments are the same as those for the FastqPhredIterator.
For each sequence in Illumina 1.3+ FASTQ files there is a matching string
encoding PHRED integer qualities using ASCII values with an offset of 64.
>>> from Bio import SeqIO
>>> record = SeqIO.read(open("Quality/illumina_faked.fastq"), "fastq-illumina")
>>> print record.id, record.seq
Test ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTN
>>> max(record.letter_annotations["phred_quality"])
40
>>> min(record.letter_annotations["phred_quality"])
0
NOTE - Older versions of the Solexa/Illumina pipeline encoded Solexa scores
with an ASCII offset of 64. They are approximately equal but only for high
quality reads. If you have an old Solexa/Illumina file with negative
Solexa scores, and try and read this as an Illumina 1.3+ file it will fail:
>>> record2 = SeqIO.read(open("Quality/solexa_faked.fastq"), "fastq-illumina")
Traceback (most recent call last):
...
ValueError: Invalid character in quality string
NOTE - True Sanger style FASTQ files use PHRED scores with an offset of 33.
"""
q_mapping = dict()
for letter in range(0, 255):
q_mapping[chr(letter)] = letter-SOLEXA_SCORE_OFFSET
for title_line, seq_string, quality_string in FastqGeneralIterator(handle):
if title2ids:
id, name, descr = title2ids(title_line)
else:
descr = title_line
id = descr.split()[0]
name = id
record = SeqRecord(Seq(seq_string, alphabet),
id=id, name=name, description=descr)
qualities = [q_mapping[letter] for letter in quality_string]
if qualities and (min(qualities) < 0 or max(qualities) > 62):
raise ValueError("Invalid character in quality string")
#Dirty trick to speed up this line:
#record.letter_annotations["phred_quality"] = qualities
dict.__setitem__(record._per_letter_annotations,
"phred_quality", qualities)
yield record
def QualPhredIterator(handle, alphabet = single_letter_alphabet, title2ids = None):
"""For QUAL files which include PHRED quality scores, but no sequence.
For example, consider this short QUAL file::
>EAS54_6_R1_2_1_413_324
26 26 18 26 26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26
26 26 26 23 23
>EAS54_6_R1_2_1_540_792
26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26 26 12 26 26
26 18 26 23 18
>EAS54_6_R1_2_1_443_348
26 26 26 26 26 26 26 26 26 26 26 24 26 22 26 26 13 22 26 18
24 18 18 18 18
Using this module directly you might run:
>>> handle = open("Quality/example.qual", "rU")
>>> for record in QualPhredIterator(handle):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 ?????????????????????????
EAS54_6_R1_2_1_540_792 ?????????????????????????
EAS54_6_R1_2_1_443_348 ?????????????????????????
>>> handle.close()
Typically however, you would call this via Bio.SeqIO instead with "qual"
as the format:
>>> from Bio import SeqIO
>>> handle = open("Quality/example.qual", "rU")
>>> for record in SeqIO.parse(handle, "qual"):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 ?????????????????????????
EAS54_6_R1_2_1_540_792 ?????????????????????????
EAS54_6_R1_2_1_443_348 ?????????????????????????
>>> handle.close()
Becase QUAL files don't contain the sequence string itself, the seq
property is set to an UnknownSeq object. As no alphabet was given, this
has defaulted to a generic single letter alphabet and the character "?"
used.
By specifying a nucleotide alphabet, "N" is used instead:
>>> from Bio import SeqIO
>>> from Bio.Alphabet import generic_dna
>>> handle = open("Quality/example.qual", "rU")
>>> for record in SeqIO.parse(handle, "qual", alphabet=generic_dna):
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 NNNNNNNNNNNNNNNNNNNNNNNNN
EAS54_6_R1_2_1_540_792 NNNNNNNNNNNNNNNNNNNNNNNNN
EAS54_6_R1_2_1_443_348 NNNNNNNNNNNNNNNNNNNNNNNNN
>>> handle.close()
However, the quality scores themselves are available as a list of integers
in each record's per-letter-annotation:
>>> print record.letter_annotations["phred_quality"]
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
You can still slice one of these SeqRecord objects with an UnknownSeq:
>>> sub_record = record[5:10]
>>> print sub_record.id, sub_record.letter_annotations["phred_quality"]
EAS54_6_R1_2_1_443_348 [26, 26, 26, 26, 26]
As of Biopython 1.59, this parser will accept files with negatives quality
scores but will replace them with the lowest possible PHRED score of zero.
This will trigger a warning, previously it raised a ValueError exception.
"""
#Skip any text before the first record (e.g. blank lines, comments)
while True:
line = handle.readline()
if line == "" : return #Premature end of file, or just empty?
if line[0] == ">":
break
while True:
if line[0] != ">":
raise ValueError("Records in Fasta files should start with '>' character")
if title2ids:
id, name, descr = title2ids(line[1:].rstrip())
else:
descr = line[1:].rstrip()
id = descr.split()[0]
name = id
qualities = []
line = handle.readline()
while True:
if not line : break
if line[0] == ">": break
qualities.extend([int(word) for word in line.split()])
line = handle.readline()
if qualities and min(qualities) < 0:
warnings.warn(("Negative quality score %i found, " + \
"substituting PHRED zero instead.") \
% min(qualities), BiopythonParserWarning)
qualities = [max(0,q) for q in qualities]
#Return the record and then continue...
record = SeqRecord(UnknownSeq(len(qualities), alphabet),
id = id, name = name, description = descr)
#Dirty trick to speed up this line:
#record.letter_annotations["phred_quality"] = qualities
dict.__setitem__(record._per_letter_annotations,
"phred_quality", qualities)
yield record
if not line : return #StopIteration
assert False, "Should not reach this line"
class FastqPhredWriter(SequentialSequenceWriter):
"""Class to write standard FASTQ format files (using PHRED quality scores).
Although you can use this class directly, you are strongly encouraged
to use the Bio.SeqIO.write() function instead via the format name "fastq"
or the alias "fastq-sanger". For example, this code reads in a standard
Sanger style FASTQ file (using PHRED scores) and re-saves it as another
Sanger style FASTQ file:
>>> from Bio import SeqIO
>>> record_iterator = SeqIO.parse(open("Quality/example.fastq"), "fastq")
>>> out_handle = open("Quality/temp.fastq", "w")
>>> SeqIO.write(record_iterator, out_handle, "fastq")
3
>>> out_handle.close()
You might want to do this if the original file included extra line breaks,
which while valid may not be supported by all tools. The output file from
Biopython will have each sequence on a single line, and each quality
string on a single line (which is considered desirable for maximum
compatibility).
In this next example, an old style Solexa/Illumina FASTQ file (using Solexa
quality scores) is converted into a standard Sanger style FASTQ file using
PHRED qualities:
>>> from Bio import SeqIO
>>> record_iterator = SeqIO.parse(open("Quality/solexa_example.fastq"), "fastq-solexa")
>>> out_handle = open("Quality/temp.fastq", "w")
>>> SeqIO.write(record_iterator, out_handle, "fastq")
5
>>> out_handle.close()
This code is also called if you use the .format("fastq") method of a
SeqRecord, or .format("fastq-sanger") if you prefer that alias.
Note that Sanger FASTQ files have an upper limit of PHRED quality 93, which is
encoded as ASCII 126, the tilde. If your quality scores are truncated to fit, a
warning is issued.
P.S. To avoid cluttering up your working directory, you can delete this
temporary file now:
>>> import os
>>> os.remove("Quality/temp.fastq")
"""
assert SANGER_SCORE_OFFSET == ord("!")
def write_record(self, record):
"""Write a single FASTQ record to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
#TODO - Is an empty sequence allowed in FASTQ format?
if record.seq is None:
raise ValueError("No sequence for record %s" % record.id)
seq_str = str(record.seq)
qualities_str = _get_sanger_quality_str(record)
if len(qualities_str) != len(seq_str):
raise ValueError("Record %s has sequence length %i but %i quality scores" \
% (record.id, len(seq_str), len(qualities_str)))
#FASTQ files can include a description, just like FASTA files
#(at least, this is what the NCBI Short Read Archive does)
id = self.clean(record.id)
description = self.clean(record.description)
if description and description.split(None, 1)[0]==id:
#The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
self.handle.write("@%s\n%s\n+\n%s\n" % (title, seq_str, qualities_str))
class QualPhredWriter(SequentialSequenceWriter):
"""Class to write QUAL format files (using PHRED quality scores).
Although you can use this class directly, you are strongly encouraged
to use the Bio.SeqIO.write() function instead. For example, this code
reads in a FASTQ file and saves the quality scores into a QUAL file:
>>> from Bio import SeqIO
>>> record_iterator = SeqIO.parse(open("Quality/example.fastq"), "fastq")
>>> out_handle = open("Quality/temp.qual", "w")
>>> SeqIO.write(record_iterator, out_handle, "qual")
3
>>> out_handle.close()
This code is also called if you use the .format("qual") method of a
SeqRecord.
P.S. Don't forget to clean up the temp file if you don't need it anymore:
>>> import os
>>> os.remove("Quality/temp.qual")
"""
def __init__(self, handle, wrap=60, record2title=None):
"""Create a QUAL writer.
Arguments:
- handle - Handle to an output file, e.g. as returned
by open(filename, "w")
- wrap - Optional line length used to wrap sequence lines.
Defaults to wrapping the sequence at 60 characters
Use zero (or None) for no wrapping, giving a single
long line for the sequence.
- record2title - Optional function to return the text to be
used for the title line of each record. By default
a combination of the record.id and record.description
is used. If the record.description starts with the
record.id, then just the record.description is used.
The record2title argument is present for consistency with the
Bio.SeqIO.FastaIO writer class.
"""
SequentialSequenceWriter.__init__(self, handle)
#self.handle = handle
self.wrap = None
if wrap:
if wrap < 1:
raise ValueError
self.wrap = wrap
self.record2title = record2title
def write_record(self, record):
"""Write a single QUAL record to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
handle = self.handle
wrap = self.wrap
if self.record2title:
title = self.clean(self.record2title(record))
else:
id = self.clean(record.id)
description = self.clean(record.description)
if description and description.split(None, 1)[0]==id:
#The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
handle.write(">%s\n" % title)
qualities = _get_phred_quality(record)
try:
#This rounds to the nearest integer.
#TODO - can we record a float in a qual file?
qualities_strs = [("%i" % round(q, 0)) for q in qualities]
except TypeError, e:
if None in qualities:
raise TypeError("A quality value of None was found")
else:
raise e
if wrap > 5:
#Fast wrapping
data = " ".join(qualities_strs)
while True:
if len(data) <= wrap:
self.handle.write(data + "\n")
break
else:
#By construction there must be spaces in the first X chars
#(unless we have X digit or higher quality scores!)
i = data.rfind(" ", 0, wrap)
handle.write(data[:i] + "\n")
data = data[i+1:]
elif wrap:
#Safe wrapping
while qualities_strs:
line = qualities_strs.pop(0)
while qualities_strs \
and len(line) + 1 + len(qualities_strs[0]) < wrap:
line += " " + qualities_strs.pop(0)
handle.write(line + "\n")
else:
#No wrapping
data = " ".join(qualities_strs)
handle.write(data + "\n")
class FastqSolexaWriter(SequentialSequenceWriter):
r"""Write old style Solexa/Illumina FASTQ format files (with Solexa qualities).
This outputs FASTQ files like those from the early Solexa/Illumina
pipeline, using Solexa scores and an ASCII offset of 64. These are
NOT compatible with the standard Sanger style PHRED FASTQ files.
If your records contain a "solexa_quality" entry under letter_annotations,
this is used, otherwise any "phred_quality" entry will be used after
conversion using the solexa_quality_from_phred function. If neither style
of quality scores are present, an exception is raised.
Although you can use this class directly, you are strongly encouraged
to use the Bio.SeqIO.write() function instead. For example, this code
reads in a FASTQ file and re-saves it as another FASTQ file:
>>> from Bio import SeqIO
>>> record_iterator = SeqIO.parse(open("Quality/solexa_example.fastq"), "fastq-solexa")
>>> out_handle = open("Quality/temp.fastq", "w")
>>> SeqIO.write(record_iterator, out_handle, "fastq-solexa")
5
>>> out_handle.close()
You might want to do this if the original file included extra line breaks,
which (while valid) may not be supported by all tools. The output file
from Biopython will have each sequence on a single line, and each quality
string on a single line (which is considered desirable for maximum
compatibility).
This code is also called if you use the .format("fastq-solexa") method of
a SeqRecord. For example,
>>> record = SeqIO.read(open("Quality/sanger_faked.fastq"), "fastq-sanger")
>>> print record.format("fastq-solexa")
@Test PHRED qualities from 40 to 0 inclusive
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTN
+
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJHGFECB@>;;
<BLANKLINE>
Note that Solexa FASTQ files have an upper limit of Solexa quality 62, which is
encoded as ASCII 126, the tilde. If your quality scores must be truncated to fit,
a warning is issued.
P.S. Don't forget to delete the temp file if you don't need it anymore:
>>> import os
>>> os.remove("Quality/temp.fastq")
"""
def write_record(self, record):
"""Write a single FASTQ record to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
#TODO - Is an empty sequence allowed in FASTQ format?
if record.seq is None:
raise ValueError("No sequence for record %s" % record.id)
seq_str = str(record.seq)
qualities_str = _get_solexa_quality_str(record)
if len(qualities_str) != len(seq_str):
raise ValueError("Record %s has sequence length %i but %i quality scores" \
% (record.id, len(seq_str), len(qualities_str)))
#FASTQ files can include a description, just like FASTA files
#(at least, this is what the NCBI Short Read Archive does)
id = self.clean(record.id)
description = self.clean(record.description)
if description and description.split(None, 1)[0]==id:
#The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
self.handle.write("@%s\n%s\n+\n%s\n" % (title, seq_str, qualities_str))
class FastqIlluminaWriter(SequentialSequenceWriter):
r"""Write Illumina 1.3+ FASTQ format files (with PHRED quality scores).
This outputs FASTQ files like those from the Solexa/Illumina 1.3+ pipeline,
using PHRED scores and an ASCII offset of 64. Note these files are NOT
compatible with the standard Sanger style PHRED FASTQ files which use an
ASCII offset of 32.
Although you can use this class directly, you are strongly encouraged to
use the Bio.SeqIO.write() function with format name "fastq-illumina"
instead. This code is also called if you use the .format("fastq-illumina")
method of a SeqRecord. For example,
>>> from Bio import SeqIO
>>> record = SeqIO.read(open("Quality/sanger_faked.fastq"), "fastq-sanger")
>>> print record.format("fastq-illumina")
@Test PHRED qualities from 40 to 0 inclusive
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTN
+
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@
<BLANKLINE>
Note that Illumina FASTQ files have an upper limit of PHRED quality 62, which is
encoded as ASCII 126, the tilde. If your quality scores are truncated to fit, a
warning is issued.
"""
def write_record(self, record):
"""Write a single FASTQ record to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
#TODO - Is an empty sequence allowed in FASTQ format?
if record.seq is None:
raise ValueError("No sequence for record %s" % record.id)
seq_str = str(record.seq)
qualities_str = _get_illumina_quality_str(record)
if len(qualities_str) != len(seq_str):
raise ValueError("Record %s has sequence length %i but %i quality scores" \
% (record.id, len(seq_str), len(qualities_str)))
#FASTQ files can include a description, just like FASTA files
#(at least, this is what the NCBI Short Read Archive does)
id = self.clean(record.id)
description = self.clean(record.description)
if description and description.split(None, 1)[0]==id:
#The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
self.handle.write("@%s\n%s\n+\n%s\n" % (title, seq_str, qualities_str))
def PairedFastaQualIterator(fasta_handle, qual_handle, alphabet = single_letter_alphabet, title2ids = None):
"""Iterate over matched FASTA and QUAL files as SeqRecord objects.
For example, consider this short QUAL file with PHRED quality scores::
>EAS54_6_R1_2_1_413_324
26 26 18 26 26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26
26 26 26 23 23
>EAS54_6_R1_2_1_540_792
26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26 26 12 26 26
26 18 26 23 18
>EAS54_6_R1_2_1_443_348
26 26 26 26 26 26 26 26 26 26 26 24 26 22 26 26 13 22 26 18
24 18 18 18 18
And a matching FASTA file::
>EAS54_6_R1_2_1_413_324
CCCTTCTTGTCTTCAGCGTTTCTCC
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
>EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
You can parse these separately using Bio.SeqIO with the "qual" and
"fasta" formats, but then you'll get a group of SeqRecord objects with
no sequence, and a matching group with the sequence but not the
qualities. Because it only deals with one input file handle, Bio.SeqIO
can't be used to read the two files together - but this function can!
For example,
>>> rec_iter = PairedFastaQualIterator(open("Quality/example.fasta", "rU"),
... open("Quality/example.qual", "rU"))
>>> for record in rec_iter:
... print record.id, record.seq
EAS54_6_R1_2_1_413_324 CCCTTCTTGTCTTCAGCGTTTCTCC
EAS54_6_R1_2_1_540_792 TTGGCAGGCCAAGGCCGATGGATCA
EAS54_6_R1_2_1_443_348 GTTGCTTCTGGCGTGGGTGGGGGGG
As with the FASTQ or QUAL parsers, if you want to look at the qualities,
they are in each record's per-letter-annotation dictionary as a simple
list of integers:
>>> print record.letter_annotations["phred_quality"]
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
If you have access to data as a FASTQ format file, using that directly
would be simpler and more straight forward. Note that you can easily use
this function to convert paired FASTA and QUAL files into FASTQ files:
>>> from Bio import SeqIO
>>> rec_iter = PairedFastaQualIterator(open("Quality/example.fasta", "rU"),
... open("Quality/example.qual", "rU"))
>>> out_handle = open("Quality/temp.fastq", "w")
>>> SeqIO.write(rec_iter, out_handle, "fastq")
3
>>> out_handle.close()
And don't forget to clean up the temp file if you don't need it anymore:
>>> import os
>>> os.remove("Quality/temp.fastq")
"""
from Bio.SeqIO.FastaIO import FastaIterator
fasta_iter = FastaIterator(fasta_handle, alphabet=alphabet, \
title2ids=title2ids)
qual_iter = QualPhredIterator(qual_handle, alphabet=alphabet, \
title2ids=title2ids)
#Using zip(...) would create a list loading everything into memory!
#It would also not catch any extra records found in only one file.
while True:
try:
f_rec = fasta_iter.next()
except StopIteration:
f_rec = None
try:
q_rec = qual_iter.next()
except StopIteration:
q_rec = None
if f_rec is None and q_rec is None:
#End of both files
break
if f_rec is None:
raise ValueError("FASTA file has more entries than the QUAL file.")
if q_rec is None:
raise ValueError("QUAL file has more entries than the FASTA file.")
if f_rec.id != q_rec.id:
raise ValueError("FASTA and QUAL entries do not match (%s vs %s)." \
% (f_rec.id, q_rec.id))
if len(f_rec) != len(q_rec.letter_annotations["phred_quality"]):
raise ValueError("Sequence length and number of quality scores disagree for %s" \
% f_rec.id)
#Merge the data....
f_rec.letter_annotations["phred_quality"] = q_rec.letter_annotations["phred_quality"]
yield f_rec
#Done
def _test():
"""Run the Bio.SeqIO module's doctests.
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..", "..", "Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..", "..", "Tests"))
assert os.path.isfile("Quality/example.fastq")
assert os.path.isfile("Quality/example.fasta")
assert os.path.isfile("Quality/example.qual")
assert os.path.isfile("Quality/tricky.fastq")
assert os.path.isfile("Quality/solexa_faked.fastq")
doctest.testmod(verbose=0)
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
|
bryback/quickseq
|
genescript/Bio/SeqIO/QualityIO.py
|
Python
|
mit
| 76,159
|
[
"BioJava",
"BioPerl",
"Biopython"
] |
f75d1a9c0e488b47a2b90330f0c76f285a2c2688907685213db8a624611b172d
|
#!/usr/local/bin/python3.4
# ----Copyright (c) 2016 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/quality-control/blob/master/LICENSE----
# argument 1 is the XML report
import sys
report = sys.argv[1]
import xml.etree.ElementTree as ET
tree = ET.parse(report)
root = tree.getroot()
cPass = 0
cFail = 0
for child in root:
for grandchild in child:
if grandchild.attrib['outcome'] == 'pass':
cPass += 1
if grandchild.attrib['outcome'] == 'fail':
cFail += 1
print("Pass count: ",cPass,'\t',"Fail count: ",cFail)
|
CarnegieHall/quality-control
|
mediaconch/mediaconch-xmlreport-summary.py
|
Python
|
mit
| 598
|
[
"VisIt"
] |
7da44fbba3e32c4ac201b53a2f5253d64f7d63c009c85d08a4e8939348a8088d
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Johan Gronqvist (johan.gronqvist@gmail.com)
# copyright (C) 2007 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "People with less than 2 parents"
#-------------------------------------------------------------------------
class MissingParent(Rule):
"""People with less than two parents"""
name = _('People missing parents')
description = _("Matches people that are children"
" in a family with less than two parents"
" or are not children in any family.")
category = _('Family filters')
def apply(self,db,person):
families = person.get_parent_family_handle_list()
if families == []:
return True
for family_handle in person.get_parent_family_handle_list():
family = db.get_family_from_handle(family_handle)
if family:
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
if not father_handle:
return True
if not mother_handle:
return True
return False
|
Nick-Hall/gramps
|
gramps/gen/filters/rules/person/_missingparent.py
|
Python
|
gpl-2.0
| 2,399
|
[
"Brian"
] |
4249e015d227dbc47e27a05efe446632c553f299f93ccd53202444e2512608c1
|
# Copyright 2013-2017, Brian May
#
# This file is part of python-alogger.
#
# python-alogger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-alogger is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-alogger If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, unicode_literals
import os.path
import unittest
from . import examples
from .base import Base
class TestSlurm(Base, unittest.TestCase):
file_prefix = "slurm"
log_type = "SLURM"
def get_cfg(self):
directory = os.path.abspath(os.path.split(examples.__file__)[0])
path = os.path.join(directory, self.file_prefix)
return {
'sacct_path': path,
'jobid_postfix': '-m',
}
|
Karaage-Cluster/python-alogger
|
alogger/tests/test_slurm.py
|
Python
|
gpl-3.0
| 1,193
|
[
"Brian"
] |
554d423dffb37f8c31422ebbce68066d2ecf04f35400f7ae35e4f501b19a6125
|
#!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
import audiotools
import struct
import random
import tempfile
import decimal
import os
import os.path
import test_streams
import cStringIO
from hashlib import md5
from test import (parser, Variable_Reader, BLANK_PCM_Reader,
RANDOM_PCM_Reader,
EXACT_BLANK_PCM_Reader, SHORT_PCM_COMBINATIONS,
MD5_Reader, FrameCounter,
MiniFrameReader, Combinations, Possibilities,
TEST_COVER1, TEST_COVER2, TEST_COVER3, HUGE_BMP)
def do_nothing(self):
pass
#add a bunch of decorator metafunctions like LIB_CORE
#which can be wrapped around individual tests as needed
for section in parser.sections():
for option in parser.options(section):
if (parser.getboolean(section, option)):
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: function
else:
vars()["%s_%s" % (section.upper(),
option.upper())] = lambda function: do_nothing
class PCMReader(unittest.TestCase):
@LIB_PCM
def test_pcm(self):
from audiotools.pcm import from_list
#try reading lots of bps/signed/endianness combinations
for bps in [8, 16, 24]:
for big_endian in [True, False]:
for signed in [True, False]:
reader = audiotools.PCMReader(
cStringIO.StringIO(
from_list(range(-5, 5),
1,
bps,
True).to_bytes(big_endian, signed)),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=bps,
signed=signed,
big_endian=big_endian)
self.assertEqual(reader.sample_rate, 44100)
self.assertEqual(reader.channels, 1)
self.assertEqual(reader.channel_mask, 0x4)
self.assertEqual(reader.bits_per_sample, bps)
#ensure the FrameList is read correctly
f = reader.read((bps / 8) * 10)
self.assertEqual(len(f), 10)
self.assertEqual(list(f), range(-5, 5))
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
f = reader.read((bps / 8) * 10)
self.assertEqual(len(f), 0)
#ensure closing the stream raises ValueErrors
#on subsequent reads
reader.close()
self.assertRaises(ValueError, reader.read, (bps / 8) * 10)
class PCMCat(unittest.TestCase):
@LIB_PCM
def test_pcm(self):
from audiotools.pcm import from_list
#ensure mismatched streams raise ValueError at init time
audiotools.PCMCat([audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16)])
self.assertRaises(ValueError,
audiotools.PCMCat,
[audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=96000,
channels=1,
channel_mask=0x4,
bits_per_sample=16),
audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16)])
self.assertRaises(ValueError,
audiotools.PCMCat,
[audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=44100,
channels=2,
channel_mask=0x3,
bits_per_sample=16),
audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16)])
self.assertRaises(ValueError,
audiotools.PCMCat,
[audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=24),
audiotools.PCMReader(cStringIO.StringIO(""),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16)])
main_readers = [audiotools.PCMReader(
cStringIO.StringIO(
from_list(samples, 1, 16, True).to_bytes(True,
True)),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16,
signed=True,
big_endian=True)
for samples in [range(-15, -5),
range(-5, 5),
range(5, 15)]]
reader = audiotools.PCMCat(main_readers)
#ensure PCMCat's stream attributes match first reader's
self.assertEqual(reader.sample_rate, 44100)
self.assertEqual(reader.channels, 1)
self.assertEqual(reader.channel_mask, 0x4)
self.assertEqual(reader.bits_per_sample, 16)
#ensure all the substreams are read correctly
samples = []
f = reader.read(2)
while (len(f) > 0):
samples.extend(list(f))
f = reader.read(2)
self.assertEqual(samples, range(-15, 15))
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(reader.read(2)), 0)
#main readers should not yet be closed
for r in main_readers:
for i in xrange(10):
self.assertEqual(len(r.read(2)), 0)
#ensure closing the stream raises ValueErrors
#on subsequent reads
reader.close()
self.assertRaises(ValueError, reader.read, 2)
#sub readers should also be closed by PCMCat's close()
for r in main_readers:
self.assertRaises(ValueError, r.read, 2)
class BufferedPCMReader(unittest.TestCase):
@LIB_PCM
def test_pcm(self):
def frame_lengths(reader, pcm_frames):
frame = reader.read(pcm_frames)
while (len(frame) > 0):
yield frame.frames
frame = reader.read(pcm_frames)
else:
reader.close()
#ensure our reader is generating randomly-sized frames
reader = Variable_Reader(EXACT_BLANK_PCM_Reader(4096 * 100))
self.assert_(len(set(frame_lengths(reader, 4096))) > 1)
#then, ensure that wrapped our reader in a BufferedPCMReader
#results in equal-sized frames
reader = audiotools.BufferedPCMReader(
Variable_Reader(EXACT_BLANK_PCM_Reader(4096 * 100)))
#(make sure to account for bps/channels in frame_lengths())
self.assertEqual(set(frame_lengths(reader, 4096)), set([4096]))
#check that sample_rate, bits_per_sample, channel_mask and channels
#pass-through properly
for sample_rate in [32000, 44100, 48000, 192000]:
for bits_per_sample in [8, 16, 24]:
for (channels, channel_mask) in [(1, 0x4),
(2, 0x3),
(4, 0x33),
(6, 0x3F)]:
reader = BLANK_PCM_Reader(1,
sample_rate=sample_rate,
channels=channels,
bits_per_sample=bits_per_sample,
channel_mask=channel_mask)
reader2 = audiotools.BufferedPCMReader(reader)
self.assertEqual(reader.sample_rate, sample_rate)
self.assertEqual(reader.channels, channels)
self.assertEqual(reader.bits_per_sample, bits_per_sample)
self.assertEqual(reader.channel_mask, channel_mask)
self.assertEqual(reader2.sample_rate, sample_rate)
self.assertEqual(reader2.channels, channels)
self.assertEqual(reader2.bits_per_sample, bits_per_sample)
self.assertEqual(reader2.channel_mask, channel_mask)
#ensure that random-sized reads also work okay
total_frames = 4096 * 1000
reader = audiotools.BufferedPCMReader(
Variable_Reader(EXACT_BLANK_PCM_Reader(total_frames)))
while (total_frames > 0):
frames = min(total_frames, random.choice(range(1, 1000)))
frame = reader.read(frames)
self.assertEqual(frame.frames, frames)
total_frames -= frame.frames
#ensure reading after the stream has been exhausted
#results in empty FrameLists
reader = audiotools.BufferedPCMReader(
EXACT_BLANK_PCM_Reader(44100))
f = reader.read(4096)
while (len(f) > 0):
f = reader.read(4096)
self.assertEqual(len(f), 0)
for i in xrange(10):
f = reader.read(4096)
self.assertEqual(len(f), 0)
#and ensure reading after the stream is closed
#raises a ValueError
reader.close()
self.assertRaises(ValueError,
reader.read,
4096)
class LimitedPCMReader(unittest.TestCase):
@LIB_PCM
def test_pcm(self):
from audiotools.pcm import from_list
main_reader = audiotools.PCMReader(
cStringIO.StringIO(
from_list(range(-50, 50), 1, 16, True).to_bytes(True, True)),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16,
signed=True,
big_endian=True)
total_samples = []
for pcm_frames in [10, 20, 30, 40]:
reader_samples = []
reader = audiotools.LimitedPCMReader(main_reader, pcm_frames)
self.assertEqual(reader.sample_rate, 44100)
self.assertEqual(reader.channels, 1)
self.assertEqual(reader.channel_mask, 0x4)
self.assertEqual(reader.bits_per_sample, 16)
f = reader.read(2)
while (len(f) > 0):
reader_samples.extend(list(f))
f = reader.read(2)
self.assertEqual(len(reader_samples), pcm_frames)
total_samples.extend(reader_samples)
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(reader.read(2)), 0)
#ensure closing the substream raises ValueErrors
#on subsequent reads
#(note that this doesn't close the main reader)
reader.close()
self.assertRaises(ValueError, reader.read, 2)
self.assertEqual(total_samples, range(-50, 50))
#ensure subsequent reads of main reader return empty FrameLists
for i in xrange(10):
self.assertEqual(len(main_reader.read(2)), 0)
#ensure closing the substream raises ValueErrors
#on subsequent reads
main_reader.close()
self.assertRaises(ValueError, main_reader.read, 2)
class PCMReaderWindow(unittest.TestCase):
@LIB_PCM
def test_pcm(self):
from audiotools.pcm import from_list
for initial_offset in range(-5, 5):
for pcm_frames in range(5, 15):
main_reader = audiotools.PCMReader(
cStringIO.StringIO(
from_list(range(1, 11),
1,
16,
True).to_bytes(True, True)),
sample_rate=44100,
channels=1,
channel_mask=0x4,
bits_per_sample=16,
signed=True,
big_endian=True)
reader = audiotools.PCMReaderWindow(main_reader,
initial_offset,
pcm_frames)
self.assertEqual(reader.sample_rate,
main_reader.sample_rate)
self.assertEqual(reader.channels,
main_reader.channels)
self.assertEqual(reader.channel_mask,
main_reader.channel_mask)
self.assertEqual(reader.bits_per_sample,
main_reader.bits_per_sample)
#ensure reads generate the proper window of samples
samples = []
f = reader.read(2)
while (len(f) > 0):
samples.extend(list(f))
f = reader.read(2)
self.assertEqual(len(samples), pcm_frames)
target_samples = range(1, 11)
if (initial_offset < 0):
#negative offsets pad window with 0s
target_samples = (([0] * abs(initial_offset)) +
target_samples)
elif (initial_offset > 0):
#positive offsets remove samples from window
target_samples = target_samples[initial_offset:]
if (len(target_samples) < pcm_frames):
#window longer than samples gets padded with 0s
target_samples += [0] * (pcm_frames - len(target_samples))
elif (len(target_samples) > pcm_frames):
#window shorder than samples truncates samples
target_samples = target_samples[0:pcm_frames]
self.assertEqual(samples, target_samples)
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(reader.read(2)), 0)
#ensure closing the PCMReaderWindow
#generates ValueErrors on subsequent reads
reader.close()
self.assertRaises(ValueError, reader.read, 2)
#ensure closing the PCMReaderWindow
#closes the main PCMReader also
self.assertRaises(ValueError, main_reader.read, 2)
class Sines(unittest.TestCase):
@LIB_PCM
def test_pcm(self):
for stream in [
test_streams.Generate01(44100),
test_streams.Generate02(44100),
test_streams.Generate03(44100),
test_streams.Generate04(44100),
test_streams.Sine8_Mono(200000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine8_Stereo(200000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine16_Mono(200000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine16_Stereo(200000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Sine24_Mono(200000, 48000,
441.0, 0.50, 441.0, 0.49),
test_streams.Sine24_Stereo(200000, 48000,
441.0, 0.50, 441.0, 0.49, 1.0),
test_streams.Simple_Sine(200000, 44100, 0x3F, 16,
(6400, 10000),
(11520, 15000),
(16640, 20000),
(21760, 25000),
(26880, 30000),
(30720, 35000)),
test_streams.fsd16([1, -1], 100),
test_streams.WastedBPS16(1000)]:
#read the base data from the stream
f = stream.read(4096)
while (len(f) > 0):
f = stream.read(4096)
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(stream.read(4096)), 0)
#ensure subsequent reads on a closed stream
#raises ValueError
stream.close()
self.assertRaises(ValueError, stream.read, 4096)
class CDDA(unittest.TestCase):
@LIB_CORE
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.bin = os.path.join(self.temp_dir, "Test.BIN")
self.cue = os.path.join(self.temp_dir, "Test.CUE")
bin_file = open(self.bin, "wb")
self.reader = test_streams.Sine16_Stereo(69470436, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0)
audiotools.transfer_framelist_data(self.reader, bin_file.write)
bin_file.close()
f = open(self.cue, "w")
f.write("""eJydkF1LwzAUQN8L/Q+X/oBxk6YfyVtoM4mu68iy6WudQ8qkHbNu+u9NneCc1IdCnk649xyuUQXk
epnpHGiOMU2Q+Z5xMCuLQs0tBOq92nTy7alus3b/AUeccL5/ZIHvZdLKWXkDjKcpIg2RszjxvYUy
09IUykCwanZNe2pAHrr6tXMjVtuZ+uG27l62Dk91T03VPG8np+oYwL1cK98DsEZmd4AE5CrXZU8c
O++wh2qzQxKc4X/S/l8vTQa3i7V2kWEap/iN57l66Pcjiq93IaWDUjpOyn9LETAVyASh1y0OR4Il
Fy3hYEs4qiXB6wOQULBQkOhCygalbISUUvrnACQVERfIr1scI4K5lk9od5+/""".decode('base64').decode('zlib'))
f.close()
self.sample_offset = audiotools.config.get_default("System",
"cdrom_read_offset",
"0")
@LIB_CORE
def tearDown(self):
for f in os.listdir(self.temp_dir):
os.unlink(os.path.join(self.temp_dir, f))
os.rmdir(self.temp_dir)
audiotools.config.set_default("System",
"cdrom_read_offset",
self.sample_offset)
@LIB_CORE
def test_init(self):
from audiotools.cdio import CDDA
from audiotools.cdio import CDImage
self.assertRaises(TypeError, CDDA)
self.assertRaises(TypeError, CDDA, None)
self.assertRaises(TypeError, CDImage)
self.assertRaises(ValueError, CDImage, "", -1)
@LIB_CORE
def test_cdda(self):
cdda = audiotools.CDDA(self.cue)
self.assertEqual(len(cdda), 4)
checksum = md5()
audiotools.transfer_framelist_data(audiotools.PCMCat(iter(cdda)),
checksum.update)
self.assertEqual(self.reader.hexdigest(),
checksum.hexdigest())
@LIB_CORE
def test_cdda_pcm(self):
cdda = audiotools.CDDA(self.cue)
for track in cdda:
#ensure all track data reads correctly
track_frames = track.length() * 588
total_frames = 0
f = track.read(4096)
while (len(f) > 0):
total_frames += f.frames
f = track.read(4096)
self.assertEqual(total_frames, track_frames)
#ensure further reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(track.read(4096)), 0)
#ensure closing the reader raises ValueErrors
#on subsequent reads
track.close()
self.assertRaises(ValueError, track.read, 4096)
@LIB_CORE
def test_cdda_positive_offset(self):
#offset values don't apply to CD images
#so this test doesn't do much
audiotools.config.set_default("System",
"cdrom_read_offset",
str(10))
cdda = audiotools.CDDA(self.cue)
reader_checksum = md5()
cdrom_checksum = md5()
audiotools.transfer_framelist_data(
audiotools.PCMCat(iter(cdda)),
cdrom_checksum.update)
self.reader.reset()
audiotools.transfer_framelist_data(
audiotools.PCMReaderWindow(self.reader,
0,
69470436),
reader_checksum.update)
self.assertEqual(reader_checksum.hexdigest(),
cdrom_checksum.hexdigest())
@LIB_CORE
def test_cdda_negative_offset(self):
#offset values don't apply to CD images
#so this test doesn't do much
audiotools.config.set_default("System",
"cdrom_read_offset",
str(-10))
cdda = audiotools.CDDA(self.cue)
reader_checksum = md5()
cdrom_checksum = md5()
audiotools.transfer_framelist_data(
audiotools.PCMCat(iter(cdda)),
cdrom_checksum.update)
self.reader.reset()
audiotools.transfer_framelist_data(
audiotools.PCMReaderWindow(self.reader,
0,
69470436),
reader_checksum.update)
self.assertEqual(reader_checksum.hexdigest(),
cdrom_checksum.hexdigest())
class ChannelMask(unittest.TestCase):
@LIB_CORE
def test_mask(self):
mask = audiotools.ChannelMask.from_fields()
self.assert_(not mask.defined())
self.assert_(mask.undefined())
self.assertEqual(len(mask), 0)
self.assertEqual(set([]), set(mask.channels()))
mask2 = audiotools.ChannelMask(int(mask))
self.assertEqual(mask, mask2)
mask_fields = audiotools.ChannelMask.SPEAKER_TO_MASK.keys()
for count in xrange(1, len(mask_fields) + 1):
for fields in Combinations(mask_fields, count):
#build a mask from fields
mask = audiotools.ChannelMask.from_fields(
**dict([(field, True) for field in fields]))
self.assert_(mask.defined())
self.assert_(not mask.undefined())
self.assertEqual(len(mask), len(fields))
self.assertEqual(set(fields), set(mask.channels()))
mask2 = audiotools.ChannelMask(int(mask))
self.assertEqual(mask, mask2)
class Filename(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.temp_file1 = os.path.join(self.temp_dir, "file1")
self.temp_file2 = os.path.join(self.temp_dir, "file2")
f = open(self.temp_file1, "w")
f.write("hello world")
f.close()
os.link(self.temp_file1, self.temp_file2)
def tearDown(self):
os.unlink(self.temp_file1)
os.unlink(self.temp_file2)
os.rmdir(self.temp_dir)
@LIB_CORE
def test_filename(self):
file1 = audiotools.Filename(self.temp_file1)
file2 = audiotools.Filename(self.temp_file2)
file3 = audiotools.Filename(os.path.join(self.temp_dir, "file3"))
file4 = audiotools.Filename(os.path.join(self.temp_dir, "./file3"))
file5 = audiotools.Filename(os.path.join(self.temp_dir, "file4"))
self.assert_(file1.disk_file())
self.assert_(file2.disk_file())
self.assertNotEqual(str(file1), str(file2))
self.assertNotEqual(unicode(file1), unicode(file2))
self.assertEqual(file1, file2)
self.assertEqual(hash(file1), hash(file2))
self.assert_(not file3.disk_file())
self.assertNotEqual(str(file1), str(file3))
self.assertNotEqual(unicode(file1), unicode(file3))
self.assertNotEqual(file1, file3)
self.assertNotEqual(hash(file1), hash(file3))
self.assert_(not file4.disk_file())
self.assertEqual(str(file3), str(file4))
self.assertEqual(unicode(file3), unicode(file4))
self.assertEqual(file3, file4)
self.assertEqual(hash(file3), hash(file4))
self.assert_(not file5.disk_file())
self.assertNotEqual(str(file3), str(file5))
self.assertNotEqual(unicode(file3), unicode(file5))
self.assertNotEqual(file3, file5)
self.assertNotEqual(hash(file3), hash(file5))
class ImageJPEG(unittest.TestCase):
@LIB_CORE
def setUp(self):
self.image = """/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYF
BgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoK
CgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCAAVAAwDAREA
AhEBAxEB/8QAGAAAAgMAAAAAAAAAAAAAAAAAAAgGBwn/xAAfEAACAgMAAwEBAAAAAAAAAAACAwQG
AQUHCBITABn/xAAUAQEAAAAAAAAAAAAAAAAAAAAA/8QAFBEBAAAAAAAAAAAAAAAAAAAAAP/aAAwD
AQACEQMRAD8A1/qnmzp6JO6PSvLudoqjZKDsZE6HB1TZEllhrLpABrNnCiYApEhrTcuAUZAuPM8M
pXgsuQJhaPDbB1q18n0tn7pQIdUtOxjFJ2lZhbIZmNV7sIlRWPDOVtetWVg0lESvqLPmZh6mQLNd
eO/02mVjy4qMeLpYXONsnb+Pe131ehvCws+2vm53hPE2SB1c1aMw1RvVJemSn5Brh1jIQNJyq32q
90ODZrvzPZU/bOJy9hXdrLjyGxWKcas5FsZhrao/T6LPGcESmBkwWeSWISH8B+D/2Q==""".decode('base64')
self.md5sum = "f8c43ff52c53aff1625979de47a04cec"
self.width = 12
self.height = 21
self.bpp = 24
self.colors = 0
self.mime_type = "image/jpeg"
@LIB_CORE
def tearDown(self):
pass
@LIB_CORE
def test_checksum(self):
self.assertEqual(md5(self.image).hexdigest(), self.md5sum)
@LIB_CORE
def test_image(self):
img = audiotools.Image.new(self.image, u"Description", 1)
self.assertEqual(img.data, self.image)
self.assertEqual(img.mime_type, self.mime_type)
self.assertEqual(img.width, self.width)
self.assertEqual(img.height, self.height)
self.assertEqual(img.color_depth, self.bpp)
self.assertEqual(img.color_count, self.colors)
self.assertEqual(img.description, u"Description")
self.assertEqual(img.type, 1)
class ImagePNG(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = """iVBORw0KGgoAAAANSUhEUgAAAAwAAAAVCAIAAAD9zpjjAAAAAXNSR0IArs4c6QAAAAlwSFlzAAAL
EwAACxMBAJqcGAAAAAd0SU1FB9kGBQA7LTgWUZgAAAAIdEVYdENvbW1lbnQA9syWvwAAANFJREFU
KM+9UrERgzAMfCUddy4pvIZZQPTsQOkBGAAxBgMwBBUTqGMHZqBSCuc4cO6SFLmokuT3698ymRk+
xQ1fxHegdV3btn092LZtHMdnse97WZYxRrtG13VN06QcZqaqIYQMBODIKdXDMADo+z7RE9HF9QFn
ZmY2sxCCqp5ZLzeIiJkBLMtycZFJKYpimqasmTOZWS7o/JhVVakqABFJPvJxInLmF5FzB2YWY3TO
ZTpExHuf8jsROefmec7Wwsx1XXvvAVCa+H7B9Of/9DPQAzSV43jVGYrtAAAAAElFTkSuQmCC""".decode('base64')
self.md5sum = "31c4c5224327d5869aa6059bcda84d2e"
self.width = 12
self.height = 21
self.bpp = 24
self.colors = 0
self.mime_type = "image/png"
class ImageCover1(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = TEST_COVER1
self.md5sum = "dbb6a01eca6336381754346de71e052e"
self.width = 500
self.height = 500
self.bpp = 24
self.colors = 0
self.mime_type = "image/jpeg"
class ImageCover2(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = TEST_COVER2
self.md5sum = "2d348cf729c840893d672dd69476955c"
self.width = 500
self.height = 500
self.bpp = 24
self.colors = 0
self.mime_type = "image/png"
class ImageCover3(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = TEST_COVER3
self.md5sum = "534b107e88d3830eac7ce814fc5d0279"
self.width = 100
self.height = 100
self.bpp = 24
self.colors = 0
self.mime_type = "image/jpeg"
class ImageGIF(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = """R0lGODdhDAAVAIQSAAAAAAoKCg0NDRUVFRkZGTIyMkBAQExMTF5eXmdnZ3Nzc4CAgJiYmKWlpc3N
zdPT0+bm5vn5+f///////////////////////////////////////////////////////ywAAAAA
DAAVAAAFPKAkjmRpnuiDmBAjRkNSKsfoFCVQLsuomwaDpOBAAYIoUaCR1P1MRAnP1BtNRwnBjiC6
loqSZ3JMLpvNIQA7""".decode('base64')
self.md5sum = "1d4d36801b53c41d01086cbf9d0cb471"
self.width = 12
self.height = 21
self.bpp = 8
self.colors = 32
self.mime_type = "image/gif"
class ImageBMP(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = """Qk0qAwAAAAAAADYAAAAoAAAADAAAABUAAAABABgAAAAAAPQCAAATCwAAEwsAAAAAAAAAAAAA////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////AAAA////////////////////////////////////////////gICAgICA////////////
////////////////zc3N////////////Z2dnDQ0N////////////////////gICAGRkZ////////
////////gICA////////////////gICAgICA////////////////////////MjIyzc3N////gICA
gICA////////////////////////////////AAAA////AAAA////////////////////////////
////////////CgoKpaWl////////////////////////////////////AAAAQEBAQEBA////////
////////////////////////QEBAQEBA////MjIyzc3N////////////////////////gICAgICA
////////////AAAA////////////////////zc3NMjIy////////////////////AAAA////////
////+fn5FRUVZ2dn////////////////////c3NzTExM////////09PTXl5e////////////////
////////5ubmmJiY////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////""".decode('base64')
self.md5sum = "cb6ef2f7a458ab1d315c329f72ec9898"
self.width = 12
self.height = 21
self.bpp = 24
self.colors = 0
self.mime_type = "image/x-ms-bmp"
class ImageTIFF(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = """SUkqAPwCAAD/////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
///T09NeXl7////////////////////////m5uaYmJj////////5+fkVFRVnZ2f/////////////
//////9zc3NMTEz////////////Nzc0yMjL///////////////////8AAAD/////////////////
//+AgICAgID///////////8AAAD///////////////////////////9AQEBAQED///8yMjLNzc3/
//////////////////////////////8AAABAQEBAQED/////////////////////////////////
//////8KCgqlpaX///////////////////////////////////8AAAD///8AAAD/////////////
//////////////////8yMjLNzc3///+AgICAgID///////////////////////+AgID/////////
//////+AgICAgID///////////////9nZ2cNDQ3///////////////////+AgIAZGRn///////+A
gICAgID////////////////////////////Nzc3///////8AAAD/////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
//////////////////////////////8QAP4ABAABAAAAAAAAAAABAwABAAAADAAAAAEBAwABAAAA
FQAAAAIBAwADAAAAwgMAAAMBAwABAAAAAQAAAAYBAwABAAAAAgAAAA0BAgAzAAAAyAMAABEBBAAB
AAAACAAAABIBAwABAAAAAQAAABUBAwABAAAAAwAAABYBAwABAAAAQAAAABcBBAABAAAA9AIAABoB
BQABAAAA/AMAABsBBQABAAAABAQAABwBAwABAAAAAQAAACgBAwABAAAAAgAAAAAAAAAIAAgACAAv
aG9tZS9icmlhbi9EZXZlbG9wbWVudC9hdWRpb3Rvb2xzL3Rlc3QvaW1hZ2UudGlmZgAAAAAASAAA
AAEAAABIAAAAAQ==""".decode('base64')
self.md5sum = "192ceb086d217421a5f151cc0afa3f05"
self.width = 12
self.height = 21
self.bpp = 24
self.colors = 0
self.mime_type = "image/tiff"
class ImageHugeBMP(ImageJPEG):
@LIB_CORE
def setUp(self):
self.image = HUGE_BMP.decode('bz2')
self.md5sum = "558d875195829de829059fd4952fed46"
self.width = 2366
self.height = 2366
self.bpp = 24
self.colors = 0
self.mime_type = "image/x-ms-bmp"
class PCMConverter(unittest.TestCase):
@LIB_PCM
def setUp(self):
self.tempwav = tempfile.NamedTemporaryFile(suffix=".wav")
@LIB_PCM
def tearDown(self):
self.tempwav.close()
@LIB_PCM
def test_conversions(self):
for ((i_sample_rate,
i_channels,
i_channel_mask,
i_bits_per_sample),
(o_sample_rate,
o_channels,
o_channel_mask,
o_bits_per_sample)) in Combinations(SHORT_PCM_COMBINATIONS, 2):
# print "(%s,%s,%s,%s) -> (%s,%s,%s,%s)" % \
# (i_sample_rate,
# i_channels,
# i_channel_mask,
# i_bits_per_sample,
# o_sample_rate,
# o_channels,
# o_channel_mask,
# o_bits_per_sample)
reader = BLANK_PCM_Reader(5,
sample_rate=i_sample_rate,
channels=i_channels,
bits_per_sample=i_bits_per_sample,
channel_mask=i_channel_mask)
converter = audiotools.PCMConverter(
reader,
sample_rate=o_sample_rate,
channels=o_channels,
bits_per_sample=o_bits_per_sample,
channel_mask=o_channel_mask)
wave = audiotools.WaveAudio.from_pcm(self.tempwav.name, converter)
converter.close()
self.assertEqual(wave.sample_rate(), o_sample_rate)
self.assertEqual(wave.channels(), o_channels)
self.assertEqual(wave.bits_per_sample(), o_bits_per_sample)
self.assertEqual(wave.channel_mask(), o_channel_mask)
self.assertEqual(
(decimal.Decimal(wave.cd_frames()) / 75).to_integral(),
5)
@LIB_PCM
def test_pcm(self):
for (in_sample_rate,
(in_channels,
in_channel_mask),
in_bits_per_sample) in Possibilities([44100, 96000],
[(1, 0x4),
(2, 0x3),
(4, 0x33)],
[16, 24]):
for (out_sample_rate,
(out_channels,
out_channel_mask),
out_bits_per_sample) in Possibilities([44100, 96000],
[(1, 0x4),
(2, 0x3),
(4, 0x33)],
[16, 24]):
main_reader = BLANK_PCM_Reader(
length=1,
sample_rate=in_sample_rate,
channels=in_channels,
bits_per_sample=in_bits_per_sample,
channel_mask=in_channel_mask)
reader = audiotools.PCMConverter(
pcmreader=main_reader,
sample_rate=out_sample_rate,
channels=out_channels,
channel_mask=out_channel_mask,
bits_per_sample=out_bits_per_sample)
#read contents of converted stream
f = reader.read(4096)
while (len(f) > 0):
f = reader.read(4096)
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(reader.read(4096)), 0)
#ensure closing stream raises ValueErrors
#on subsequent reads
reader.close()
self.assertRaises(ValueError, reader.read, 4096)
#ensure main reader is also closed
#when converter is closed
self.assertRaises(ValueError, main_reader.read, 4096)
class Test_ReplayGain(unittest.TestCase):
@LIB_CORE
def test_replaygain(self):
#a trivial test of the ReplayGain container
self.assertEqual(audiotools.ReplayGain(0.5, 1.0, 0.5, 1.0),
audiotools.ReplayGain(0.5, 1.0, 0.5, 1.0))
self.assertNotEqual(audiotools.ReplayGain(0.5, 1.0, 0.5, 1.0),
audiotools.ReplayGain(0.25, 1.0, 0.5, 1.0))
self.assertNotEqual(audiotools.ReplayGain(0.5, 1.0, 0.5, 1.0),
audiotools.ReplayGain(0.5, 0.5, 0.5, 1.0))
self.assertNotEqual(audiotools.ReplayGain(0.5, 1.0, 0.5, 1.0),
audiotools.ReplayGain(0.5, 1.0, 0.25, 1.0))
self.assertNotEqual(audiotools.ReplayGain(0.5, 1.0, 0.5, 1.0),
audiotools.ReplayGain(0.5, 1.0, 0.5, 0.5))
class Test_filename_to_type(unittest.TestCase):
@LIB_CORE
def test_filename_to_type(self):
type_group = {}
for audio_type in audiotools.AVAILABLE_TYPES:
type_group.setdefault(audio_type.SUFFIX, []).append(audio_type)
for suffix in type_group.keys():
temp = tempfile.NamedTemporaryFile(suffix="." + suffix)
try:
if (len(type_group[suffix]) == 1):
self.assertEqual(audiotools.filename_to_type(temp.name),
type_group[suffix][0])
else:
self.assertRaises(audiotools.AmbiguousAudioType,
audiotools.filename_to_type,
temp.name)
finally:
temp.close()
temp = tempfile.NamedTemporaryFile(suffix=".foo")
try:
self.assertRaises(audiotools.UnknownAudioType,
audiotools.filename_to_type,
temp.name)
finally:
temp.close()
temp = tempfile.NamedTemporaryFile()
try:
self.assertRaises(audiotools.UnknownAudioType,
audiotools.filename_to_type,
temp.name)
finally:
temp.close()
class Test_timestamp(unittest.TestCase):
@LIB_CORE
def test_timestamp(self):
for timestamp in xrange(100000):
self.assertEqual(
audiotools.parse_timestamp(
audiotools.build_timestamp(timestamp)),
timestamp)
class Test_group_tracks(unittest.TestCase):
@LIB_CORE
def setUp(self):
self.output_format = audiotools.FlacAudio
self.track_files = [
tempfile.NamedTemporaryFile(
suffix="." + self.output_format.SUFFIX)
for i in xrange(5)]
self.tracks = [
self.output_format.from_pcm(
track.name,
BLANK_PCM_Reader(1)) for track in self.track_files]
self.tracks[0].set_metadata(audiotools.MetaData(
album_name=u"Album 1",
album_number=1,
track_number=1))
self.tracks[1].set_metadata(audiotools.MetaData(
album_name=u"Album 2",
album_number=1,
track_number=1))
self.tracks[2].set_metadata(audiotools.MetaData(
album_name=u"Album 1",
album_number=1,
track_number=2))
self.tracks[3].set_metadata(audiotools.MetaData(
album_name=u"Album 2",
album_number=2,
track_number=1))
self.tracks[4].set_metadata(audiotools.MetaData(
album_name=u"Album 3",
album_number=1,
track_number=1))
@LIB_CORE
def tearDown(self):
for track in self.track_files:
track.close()
@LIB_CORE
def test_grouping(self):
groupings = list(audiotools.group_tracks(self.tracks))
groupings.sort(lambda x, y: cmp(x[0].get_metadata().album_name,
y[0].get_metadata().album_name))
self.assertEqual(groupings[0], [self.tracks[0], self.tracks[2]])
self.assertEqual(groupings[1], [self.tracks[1]])
self.assertEqual(groupings[2], [self.tracks[3]])
self.assertEqual(groupings[3], [self.tracks[4]])
class Test_open(unittest.TestCase):
@LIB_CORE
def setUp(self):
self.dummy1 = tempfile.NamedTemporaryFile()
self.dummy2 = tempfile.NamedTemporaryFile()
self.dummy3 = tempfile.NamedTemporaryFile()
self.dummy1.write("12345" * 1000)
self.dummy1.flush()
self.dummy2.write("54321" * 1000)
self.dummy2.flush()
data = open("flac-allframes.flac", "rb").read()
self.dummy3.write(data[0:0x4] + chr(0xFF) + data[0x5:])
self.dummy3.flush()
@LIB_CORE
def tearDown(self):
self.dummy1.close()
self.dummy2.close()
@LIB_CORE
def test_open(self):
#ensure open on dummy file raises UnsupportedFile
self.assertRaises(audiotools.UnsupportedFile,
audiotools.open,
self.dummy1.name)
#ensure open on nonexistent file raises IOError
self.assertRaises(IOError,
audiotools.open,
"/dev/null/foo")
#ensure open on directory raises IOError
self.assertRaises(IOError,
audiotools.open,
"/")
#ensure open on unreadable file raises IOError
os.chmod(self.dummy1.name, 0)
try:
self.assertRaises(IOError,
audiotools.open,
self.dummy1.name)
finally:
os.chmod(self.dummy1.name, 0600)
self.assertRaises(audiotools.InvalidFile,
audiotools.open,
self.dummy3.name)
class Test_open_directory(unittest.TestCase):
@LIB_CORE
def setUp(self):
self.output_type = audiotools.FlacAudio
self.suffix = "." + self.output_type.SUFFIX
self.dir = tempfile.mkdtemp()
def make_track(self, directory, track_number):
track = self.output_type.from_pcm(
os.path.join(directory, str(track_number) + self.suffix),
BLANK_PCM_Reader(1))
track.set_metadata(audiotools.MetaData(track_name=u"Track Name",
track_number=track_number))
return track
@LIB_CORE
def tearDown(self):
import shutil
shutil.rmtree(self.dir)
@LIB_CORE
def test_open_directory(self):
subdir1 = os.path.join(self.dir, "dir1")
subdir2 = os.path.join(self.dir, "dir2")
subdir3 = os.path.join(subdir1, "dir3")
os.mkdir(subdir1)
os.mkdir(subdir2)
os.mkdir(subdir3)
track0_1 = self.make_track(self.dir, 1)
track0_2 = self.make_track(self.dir, 2)
track0_3 = self.make_track(self.dir, 3)
track1_1 = self.make_track(subdir1, 1)
track1_2 = self.make_track(subdir1, 2)
track1_3 = self.make_track(subdir1, 3)
track2_1 = self.make_track(subdir2, 1)
track2_2 = self.make_track(subdir2, 2)
track2_3 = self.make_track(subdir2, 3)
track3_1 = self.make_track(subdir3, 1)
track3_2 = self.make_track(subdir3, 2)
track3_3 = self.make_track(subdir3, 3)
tracks = list(audiotools.open_directory(self.dir))
self.assertEqual([t.filename for t in tracks],
[t.filename for t in
[track0_1, track0_2, track0_3,
track1_1, track1_2, track1_3,
track3_1, track3_2, track3_3,
track2_1, track2_2, track2_3]])
class Test_open_files(unittest.TestCase):
@LIB_CORE
def setUp(self):
self.output_type = audiotools.FlacAudio
self.suffix = "." + self.output_type.SUFFIX
self.dir = tempfile.mkdtemp()
def make_track(self, directory, track_number):
track = self.output_type.from_pcm(
os.path.join(directory, str(track_number) + self.suffix),
BLANK_PCM_Reader(1))
track.set_metadata(audiotools.MetaData(track_name=u"Track Name",
track_number=track_number))
return track
@LIB_CORE
def tearDown(self):
import shutil
shutil.rmtree(self.dir)
@LIB_CORE
def test_open_files(self):
track1 = self.make_track(self.dir, 1)
track2 = self.make_track(self.dir, 2)
track3 = self.make_track(self.dir, 3)
dummy1_name = os.path.join(self.dir, "4" + self.suffix)
dummy1 = open(dummy1_name, "wb")
dummy1.write("Hello World")
dummy1.close()
tracks = list(audiotools.open_files([track1.filename, track2.filename,
dummy1_name, track3.filename]))
self.assertEqual([t.filename for t in tracks],
[t.filename for t in [track1, track2, track3]])
class Test_pcm_frame_cmp(unittest.TestCase):
@LIB_CORE
def test_pcm_frame_cmp(self):
self.assert_(audiotools.pcm_frame_cmp(
test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0),
test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0)) is None)
self.assertEqual(audiotools.pcm_frame_cmp(BLANK_PCM_Reader(1),
RANDOM_PCM_Reader(1)), 0)
self.assertEqual(audiotools.pcm_frame_cmp(
BLANK_PCM_Reader(1),
BLANK_PCM_Reader(1, sample_rate=48000)), 0)
self.assertEqual(audiotools.pcm_frame_cmp(
BLANK_PCM_Reader(1),
BLANK_PCM_Reader(1, channels=1)), 0)
self.assertEqual(audiotools.pcm_frame_cmp(
BLANK_PCM_Reader(1),
BLANK_PCM_Reader(1, bits_per_sample=24)), 0)
self.assertEqual(audiotools.pcm_frame_cmp(
BLANK_PCM_Reader(1),
BLANK_PCM_Reader(1, channel_mask=0x30)), 0)
self.assertEqual(audiotools.pcm_frame_cmp(
BLANK_PCM_Reader(2),
audiotools.PCMCat(iter([BLANK_PCM_Reader(1),
RANDOM_PCM_Reader(1)]))), 44100)
class Test_pcm_split(unittest.TestCase):
@LIB_CORE
def test_pcm_split(self):
from itertools import izip
pcm_frames = [44100 * l for l in (5, 10, 15, 4, 16, 10)]
for (sub_pcm, sub_frames) in izip(
audiotools.pcm_split(BLANK_PCM_Reader(60), pcm_frames),
pcm_frames):
counter = FrameCounter(2, 16, 44100)
audiotools.transfer_framelist_data(sub_pcm, counter.update)
self.assertEqual(sub_frames, int(counter) * 44100)
class Test_str_width(unittest.TestCase):
@LIB_CORE
def test_str_width(self):
#check a plain ASCII string
self.assertEqual(audiotools.str_width(u"Foo"), 3)
#check a Unicode string without combining characters
self.assertEqual(audiotools.str_width(u"F\u00f3o"), 3)
#check a Unicode string with combining characters
self.assertEqual(audiotools.str_width(u"Fo\u0301o"), 3)
#check an ANSI-escaped ASCII string
self.assertEqual(audiotools.str_width(u"\x1b[1mFoo\x1b[0m"), 3)
#check an ANSI-escaped Unicode string without combining characeters
self.assertEqual(audiotools.str_width(u"\x1b[1mF\u00f3o\x1b[0m"), 3)
#check an ANSI-escaped Unicode string with combining characters
self.assertEqual(audiotools.str_width(u"\x1b[1mFo\u0301o\x1b[0m"), 3)
class TestFrameList(unittest.TestCase):
@classmethod
def Bits8(cls):
for i in xrange(0, 0xFF + 1):
yield chr(i)
@classmethod
def Bits16(cls):
for i in xrange(0, 0xFF + 1):
for j in xrange(0, 0xFF + 1):
yield chr(i) + chr(j)
@classmethod
def Bits24(cls):
for i in xrange(0, 0xFF + 1):
for j in xrange(0, 0xFF + 1):
for k in xrange(0, 0xFF + 1):
yield chr(i) + chr(j) + chr(k)
@LIB_CORE
def test_basics(self):
import audiotools.pcm
self.assertRaises(TypeError,
audiotools.pcm.FrameList,
0, 2, 16, 0, 1)
self.assertRaises(TypeError,
audiotools.pcm.FrameList,
[1, 2, 3], 2, 16, 0, 1)
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
"abc", 2, 16, 0, 1)
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
"abc", 4, 8, 0, 1)
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
"abcd", 1, 15, 0, 1)
f = audiotools.pcm.FrameList("".join(map(chr, range(16))),
2, 16, True, True)
self.assertEqual(len(f), 8)
self.assertEqual(f.channels, 2)
self.assertEqual(f.frames, 4)
self.assertEqual(f.bits_per_sample, 16)
self.assertRaises(IndexError, f.__getitem__, 9)
self.assertEqual(list(f.frame(0)),
[0x0001, 0x0203])
self.assertEqual(list(f.frame(1)),
[0x0405, 0x0607])
self.assertEqual(list(f.frame(2)),
[0x0809, 0x0A0B])
self.assertEqual(list(f.frame(3)),
[0x0C0D, 0x0E0F])
self.assertRaises(IndexError, f.frame, 4)
self.assertRaises(IndexError, f.frame, -1)
self.assertEqual(list(f.channel(0)),
[0x0001, 0x0405, 0x0809, 0x0C0D])
self.assertEqual(list(f.channel(1)),
[0x0203, 0x0607, 0x0A0B, 0x0E0F])
self.assertRaises(IndexError, f.channel, 2)
self.assertRaises(IndexError, f.channel, -1)
for bps in [8, 16, 24]:
self.assertEqual(list(audiotools.pcm.from_list(
range(-40, 40), 1, bps, True)),
range(-40, 40))
for bps in [8, 16, 24]:
self.assertEqual(list(audiotools.pcm.from_list(
range((1 << (bps - 1)) - 40,
(1 << (bps - 1)) + 40), 1, bps, False)),
range(-40, 40))
for channels in range(1, 9):
for bps in [8, 16, 24]:
for signed in [True, False]:
if (signed):
l = [random.choice(range(-40, 40)) for i in
xrange(16 * channels)]
else:
l = [random.choice(range(0, 80)) for i in
xrange(16 * channels)]
f2 = audiotools.pcm.from_list(l, channels, bps, signed)
if (signed):
self.assertEqual(list(f2), l)
for channel in range(channels):
self.assertEqual(list(f2.channel(channel)),
l[channel::channels])
else:
self.assertEqual(list(f2),
[i - (1 << (bps - 1))
for i in l])
for channel in range(channels):
self.assertEqual(list(f2.channel(channel)),
[i - (1 << (bps - 1))
for i in l[channel::channels]])
self.assertEqual(f.to_bytes(True, True),
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f')
self.assertEqual(f.to_bytes(False, True),
'\x01\x00\x03\x02\x05\x04\x07\x06\t\x08\x0b\n\r\x0c\x0f\x0e')
#FIXME - check signed
self.assertEqual(list(f),
list(audiotools.pcm.from_frames([f.frame(0),
f.frame(1),
f.frame(2),
f.frame(3)])))
self.assertEqual(list(f),
list(audiotools.pcm.from_channels([f.channel(0),
f.channel(1)])))
self.assertEqual(list(audiotools.pcm.from_list(
[0x0001, 0x0203, 0x0405, 0x0607,
0x0809, 0x0A0B, 0x0C0D, 0x0E0F], 2, 16, True)),
list(f))
self.assertRaises(ValueError,
audiotools.pcm.from_list,
[0x0001, 0x0203, 0x0405, 0x0607,
0x0809, 0x0A0B, 0x0C0D], 2, 16, True)
self.assertRaises(ValueError,
audiotools.pcm.from_list,
[0x0001, 0x0203, 0x0405, 0x0607,
0x0809, 0x0A0B, 0x0C0D, 0x0E0F], 2, 15, True)
self.assertRaises(TypeError,
audiotools.pcm.from_frames,
[audiotools.pcm.from_list(range(2), 2, 16, False),
range(2)])
self.assertRaises(ValueError,
audiotools.pcm.from_frames,
[audiotools.pcm.from_list(range(2), 2, 16, False),
audiotools.pcm.from_list(range(4), 2, 16, False)])
self.assertRaises(ValueError,
audiotools.pcm.from_frames,
[audiotools.pcm.from_list(range(2), 2, 16, False),
audiotools.pcm.from_list(range(2), 1, 16, False)])
self.assertRaises(ValueError,
audiotools.pcm.from_frames,
[audiotools.pcm.from_list(range(2), 2, 16, False),
audiotools.pcm.from_list(range(2), 2, 8, False)])
self.assertEqual(list(audiotools.pcm.from_frames(
[audiotools.pcm.from_list(range(2), 2, 16, True),
audiotools.pcm.from_list(range(2, 4), 2, 16, True)])),
range(4))
self.assertRaises(TypeError,
audiotools.pcm.from_channels,
[audiotools.pcm.from_list(range(2), 1, 16, False),
range(2)])
self.assertRaises(ValueError,
audiotools.pcm.from_channels,
[audiotools.pcm.from_list(range(1), 1, 16, False),
audiotools.pcm.from_list(range(2), 2, 16, False)])
self.assertRaises(ValueError,
audiotools.pcm.from_channels,
[audiotools.pcm.from_list(range(2), 1, 16, False),
audiotools.pcm.from_list(range(3), 1, 16, False)])
self.assertRaises(ValueError,
audiotools.pcm.from_channels,
[audiotools.pcm.from_list(range(2), 1, 16, False),
audiotools.pcm.from_list(range(2), 1, 8, False)])
self.assertEqual(list(audiotools.pcm.from_channels(
[audiotools.pcm.from_list(range(2), 1, 16, True),
audiotools.pcm.from_list(range(2, 4), 1, 16, True)])),
[0, 2, 1, 3])
self.assertRaises(IndexError, f.split, -1)
(f1, f2) = f.split(2)
self.assertEqual(list(f1),
[0x0001, 0x0203,
0x0405, 0x0607])
self.assertEqual(list(f2),
[0x0809, 0x0A0B,
0x0C0D, 0x0E0F])
(f1, f2) = f.split(0)
self.assertEqual(list(f1),
[])
self.assertEqual(list(f2),
[0x0001, 0x0203,
0x0405, 0x0607,
0x0809, 0x0A0B,
0x0C0D, 0x0E0F])
(f1, f2) = f.split(20)
self.assertEqual(list(f1),
[0x0001, 0x0203,
0x0405, 0x0607,
0x0809, 0x0A0B,
0x0C0D, 0x0E0F])
self.assertEqual(list(f2),
[])
for i in xrange(f.frames):
(f1, f2) = f.split(i)
self.assertEqual(len(f1), i * f.channels)
self.assertEqual(len(f2), (len(f) - (i * f.channels)))
self.assertEqual(list(f1 + f2), list(f))
import operator
f1 = audiotools.pcm.from_list(range(10), 2, 16, False)
self.assertRaises(TypeError, operator.concat, f1, [1, 2, 3])
f2 = audiotools.pcm.from_list(range(10, 20), 1, 16, False)
self.assertRaises(ValueError, operator.concat, f1, f2)
f2 = audiotools.pcm.from_list(range(10, 20), 2, 8, False)
self.assertRaises(ValueError, operator.concat, f1, f2)
f1 = audiotools.pcm.from_list(range(10), 2, 16, False)
self.assertEqual(f1, audiotools.pcm.from_list(range(10), 2, 16, False))
self.assertNotEqual(f1, 10)
self.assertNotEqual(f1, range(10))
self.assertNotEqual(f1,
audiotools.pcm.from_list(range(10), 1, 16, False))
self.assertNotEqual(f1,
audiotools.pcm.from_list(range(10), 2, 8, False))
self.assertNotEqual(f1,
audiotools.pcm.from_list(range(10), 1, 8, False))
self.assertNotEqual(f1,
audiotools.pcm.from_list(range(8), 2, 16, False))
self.assertNotEqual(f1,
audiotools.pcm.from_list(range(12), 2, 8, False))
@LIB_CORE
def test_8bit_roundtrip(self):
import audiotools.pcm
unsigned_ints = range(0, 0xFF + 1)
signed_ints = range(-0x80, 0x7F + 1)
#unsigned, big-endian
self.assertEqual([i - (1 << 7) for i in unsigned_ints],
list(audiotools.pcm.FrameList(
struct.pack(">%dB" % (len(unsigned_ints)), *unsigned_ints),
1, 8, True, False)))
#unsigned, little-endian
self.assertEqual([i - (1 << 7) for i in unsigned_ints],
list(audiotools.pcm.FrameList(
struct.pack("<%dB" % (len(unsigned_ints)), *unsigned_ints),
1, 8, False, False)))
#signed, big-endian
self.assertEqual(signed_ints,
list(audiotools.pcm.FrameList(
struct.pack(">%db" % (len(signed_ints)), *signed_ints),
1, 8, True, True)))
#signed, little-endian
self.assertEqual(signed_ints,
list(audiotools.pcm.FrameList(
struct.pack("<%db" % (len(signed_ints)), *signed_ints),
1, 8, 0, 1)))
@LIB_CORE
def test_8bit_roundtrip_str(self):
import audiotools.pcm
s = "".join(TestFrameList.Bits8())
#big endian, unsigned
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 8,
True, False).to_bytes(True, False), s)
#big-endian, signed
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 8,
True, True).to_bytes(True, True), s)
#little-endian, unsigned
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 8,
False, False).to_bytes(False, False), s)
#little-endian, signed
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 8,
False, True).to_bytes(False, True), s)
@LIB_CORE
def test_16bit_roundtrip(self):
import audiotools.pcm
unsigned_ints = range(0, 0xFFFF + 1)
signed_ints = range(-0x8000, 0x7FFF + 1)
#unsigned, big-endian
self.assertEqual([i - (1 << 15) for i in unsigned_ints],
list(audiotools.pcm.FrameList(
struct.pack(">%dH" % (len(unsigned_ints)), *unsigned_ints),
1, 16, True, False)))
#unsigned, little-endian
self.assertEqual([i - (1 << 15) for i in unsigned_ints],
list(audiotools.pcm.FrameList(
struct.pack("<%dH" % (len(unsigned_ints)), *unsigned_ints),
1, 16, False, False)))
#signed, big-endian
self.assertEqual(signed_ints,
list(audiotools.pcm.FrameList(
struct.pack(">%dh" % (len(signed_ints)), *signed_ints),
1, 16, True, True)))
#signed, little-endian
self.assertEqual(signed_ints,
list(audiotools.pcm.FrameList(
struct.pack("<%dh" % (len(signed_ints)), *signed_ints),
1, 16, False, True)))
@LIB_CORE
def test_16bit_roundtrip_str(self):
import audiotools.pcm
s = "".join(TestFrameList.Bits16())
#big-endian, unsigned
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 16,
True, False).to_bytes(True, False),
s,
"data mismatch converting UBInt16 through string")
#big-endian, signed
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 16,
True, True).to_bytes(True, True),
s,
"data mismatch converting SBInt16 through string")
#little-endian, unsigned
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 16,
False, False).to_bytes(False, False),
s,
"data mismatch converting ULInt16 through string")
#little-endian, signed
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 16,
False, True).to_bytes(False, True),
s,
"data mismatch converting USInt16 through string")
@LIB_CORE
def test_24bit_roundtrip(self):
import audiotools.pcm
from audiotools.bitstream import BitstreamRecorder
#setting this higher than 1 means we only test a sample
#of the full 24-bit value range
#since testing the whole range takes a very, very long time
RANGE = 8
unsigned_ints_high = [r << 8 for r in xrange(0, 0xFFFF + 1)]
signed_ints_high = [r << 8 for r in xrange(-0x8000, 0x7FFF + 1)]
for low_bits in xrange(0, 0xFF + 1, RANGE):
unsigned_values = [high_bits | low_bits for high_bits in
unsigned_ints_high]
rec = BitstreamRecorder(0)
rec.build("24u" * len(unsigned_values), unsigned_values)
self.assertEqual([i - (1 << 23) for i in unsigned_values],
list(audiotools.pcm.FrameList(
rec.data(), 1, 24, True, False)))
rec = BitstreamRecorder(1)
rec.build("24u" * len(unsigned_values), unsigned_values)
self.assertEqual([i - (1 << 23) for i in unsigned_values],
list(audiotools.pcm.FrameList(
rec.data(), 1, 24, False, False)))
for low_bits in xrange(0, 0xFF + 1, RANGE):
if (high_bits < 0):
signed_values = [high_bits - low_bits for high_bits in
signed_ints_high]
else:
signed_values = [high_bits + low_bits for high_bits in
signed_ints_high]
rec = BitstreamRecorder(0)
rec.build("24s" * len(signed_values), signed_values)
self.assertEqual(signed_values,
list(audiotools.pcm.FrameList(
rec.data(), 1, 24, True, True)))
rec = BitstreamRecorder(1)
rec.build("24s" * len(signed_values), signed_values)
self.assertEqual(signed_values,
list(audiotools.pcm.FrameList(
rec.data(), 1, 24, False, True)))
@LIB_CORE
def test_24bit_roundtrip_str(self):
import audiotools.pcm
s = "".join(TestFrameList.Bits24())
#big-endian, unsigned
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 24,
True, False).to_bytes(True, False), s)
#big-endian, signed
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 24,
True, True).to_bytes(True, True), s)
#little-endian, unsigned
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 24,
False, False).to_bytes(False, False), s)
#little-endian, signed
self.assertEqual(
audiotools.pcm.FrameList(s, 1, 24,
False, True).to_bytes(False, True), s)
@LIB_CORE
def test_conversion(self):
for format in audiotools.AVAILABLE_TYPES:
temp_track = tempfile.NamedTemporaryFile(suffix="." + format.SUFFIX)
try:
for sine_class in [test_streams.Sine8_Stereo,
test_streams.Sine16_Stereo,
test_streams.Sine24_Stereo]:
sine = sine_class(88200, 44100, 441.0, 0.50, 441.0, 0.49, 1.0)
try:
track = format.from_pcm(temp_track.name, sine)
except audiotools.UnsupportedBitsPerSample:
continue
if (track.lossless()):
md5sum = md5()
audiotools.transfer_framelist_data(track.to_pcm(),
md5sum.update)
self.assertEqual(md5sum.hexdigest(), sine.hexdigest(),
"MD5 mismatch for %s using %s" % \
(track.NAME, repr(sine)))
for new_format in audiotools.AVAILABLE_TYPES:
temp_track2 = tempfile.NamedTemporaryFile(suffix="." + format.SUFFIX)
try:
try:
track2 = new_format.from_pcm(temp_track2.name,
track.to_pcm())
if (track2.lossless()):
md5sum2 = md5()
audiotools.transfer_framelist_data(track2.to_pcm(),
md5sum2.update)
self.assertEqual(md5sum.hexdigest(), sine.hexdigest(),
"MD5 mismatch for converting %s from %s to %s" % \
(repr(sine), track.NAME, track2.NAME))
except audiotools.UnsupportedBitsPerSample:
continue
finally:
temp_track2.close()
finally:
temp_track.close()
@LIB_CORE
def test_errors(self):
#check list that's too large
self.assertRaises(ValueError,
audiotools.pcm.FloatFrameList,
[0.0] * 5, 2)
#check list that's too small
self.assertRaises(ValueError,
audiotools.pcm.FloatFrameList,
[0.0] * 3, 2)
#check channels <= 0
self.assertRaises(ValueError,
audiotools.pcm.FloatFrameList,
[0.0] * 4, 0)
self.assertRaises(ValueError,
audiotools.pcm.FloatFrameList,
[0.0] * 4, -1)
class TestFloatFrameList(unittest.TestCase):
@LIB_CORE
def test_basics(self):
import audiotools.pcm
self.assertRaises(ValueError,
audiotools.pcm.FloatFrameList,
[1.0, 2.0, 3.0], 2)
self.assertRaises(TypeError,
audiotools.pcm.FloatFrameList,
0, 1)
self.assertRaises(TypeError,
audiotools.pcm.FloatFrameList,
[1.0, 2.0, "a"], 1)
f = audiotools.pcm.FloatFrameList(map(float, range(8)), 2)
self.assertEqual(len(f), 8)
self.assertEqual(f.channels, 2)
self.assertEqual(f.frames, 4)
self.assertRaises(IndexError, f.__getitem__, 9)
self.assertEqual(list(f.frame(0)),
[0.0, 1.0])
self.assertEqual(list(f.frame(1)),
[2.0, 3.0])
self.assertEqual(list(f.frame(2)),
[4.0, 5.0])
self.assertEqual(list(f.frame(3)),
[6.0, 7.0])
self.assertRaises(IndexError, f.frame, 4)
self.assertRaises(IndexError, f.frame, -1)
self.assertEqual(list(f.channel(0)),
[0.0, 2.0, 4.0, 6.0])
self.assertEqual(list(f.channel(1)),
[1.0, 3.0, 5.0, 7.0])
self.assertRaises(IndexError, f.channel, 2)
self.assertRaises(IndexError, f.channel, -1)
self.assertEqual(list(f),
list(audiotools.pcm.from_float_frames([f.frame(0),
f.frame(1),
f.frame(2),
f.frame(3)])))
self.assertEqual(list(f),
list(audiotools.pcm.from_float_channels([f.channel(0),
f.channel(1)])))
#FIXME - check from_frames
#FIXME - check from_channels
self.assertRaises(IndexError, f.split, -1)
(f1, f2) = f.split(2)
self.assertEqual(list(f1),
[0.0, 1.0,
2.0, 3.0])
self.assertEqual(list(f2),
[4.0, 5.0,
6.0, 7.0])
(f1, f2) = f.split(0)
self.assertEqual(list(f1),
[])
self.assertEqual(list(f2),
[0.0, 1.0,
2.0, 3.0,
4.0, 5.0,
6.0, 7.0])
(f1, f2) = f.split(20)
self.assertEqual(list(f1),
[0.0, 1.0,
2.0, 3.0,
4.0, 5.0,
6.0, 7.0])
self.assertEqual(list(f2),
[])
for i in xrange(f.frames):
(f1, f2) = f.split(i)
self.assertEqual(len(f1), i * f.channels)
self.assertEqual(len(f2), (len(f) - (i * f.channels)))
self.assertEqual(list(f1 + f2), list(f))
import operator
f1 = audiotools.pcm.FloatFrameList(map(float, range(10)), 2)
self.assertRaises(TypeError, operator.concat, f1, [1, 2, 3])
#check round-trip from float->int->float
l = [float(i - 128) / (1 << 7) for i in range(0, 1 << 8)]
for bps in [8, 16, 24]:
for signed in [True, False]:
self.assertEqual(
l,
list(audiotools.pcm.FloatFrameList(l, 1).to_int(bps).to_float()))
#check round-trip from int->float->int
for bps in [8, 16, 24]:
l = range(0, 1 << bps, 4)
self.assertEqual(
[i - (1 << (bps - 1)) for i in l],
list(audiotools.pcm.from_list(l, 1, bps, False).to_float().to_int(bps)))
l = range(-(1 << (bps - 1)), (1 << (bps - 1)) - 1, 4)
self.assertEqual(
l,
list(audiotools.pcm.from_list(l, 1, bps, True).to_float().to_int(bps)))
@LIB_CORE
def test_errors(self):
#check string that's too large
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
chr(0) * 5, 2, 16, 1, 1)
#check string that's too small
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
chr(0) * 3, 2, 16, 1, 1)
#check channels <= 0
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
chr(0) * 4, 0, 16, 1, 1)
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
chr(0) * 4, -1, 16, 1, 1)
#check bps != 8,16,24
for bps in [0, 7, 9, 15, 17, 23, 25, 64]:
self.assertRaises(ValueError,
audiotools.pcm.FrameList,
chr(0) * 4, 2, bps, 1, 1)
class __SimpleChunkReader__:
def __init__(self, chunks):
self.chunks = chunks
self.position = 0
def read(self, bytes):
try:
self.position += len(self.chunks[0])
return self.chunks.pop(0)
except IndexError:
return ""
def tell(self):
return self.position
def close(self):
pass
class ByteCounter:
def __init__(self):
self.bytes = 0
def __int__(self):
return self.bytes
def reset(self):
self.bytes = 0
def callback(self, i):
self.bytes += 1
class Bitstream(unittest.TestCase):
def __test_big_endian_reader__(self, reader, table):
#check the bitstream reader
#against some known big-endian values
reader.mark()
self.assertEqual(reader.read(2), 0x2)
self.assertEqual(reader.read(3), 0x6)
self.assertEqual(reader.read(5), 0x07)
self.assertEqual(reader.read(3), 0x5)
self.assertEqual(reader.read(19), 0x53BC1)
reader.rewind()
self.assertEqual(reader.read64(2), 0x2)
self.assertEqual(reader.read64(3), 0x6)
self.assertEqual(reader.read64(5), 0x07)
self.assertEqual(reader.read64(3), 0x5)
self.assertEqual(reader.read64(19), 0x53BC1)
reader.rewind()
self.assertEqual(reader.read(2), 0x2)
reader.skip(3)
self.assertEqual(reader.read(5), 0x07)
reader.skip(3)
self.assertEqual(reader.read(19), 0x53BC1)
reader.rewind()
self.assertEqual(reader.read(1), 1)
bit = reader.read(1)
self.assertEqual(bit, 0)
reader.unread(bit)
self.assertEqual(reader.read(2), 1)
reader.byte_align()
reader.rewind()
self.assertEqual(reader.read(8), 0xB1)
reader.unread(0)
self.assertEqual(reader.read(1), 0)
reader.unread(1)
self.assertEqual(reader.read(1), 1)
reader.rewind()
self.assertEqual(reader.read_signed(2), -2)
self.assertEqual(reader.read_signed(3), -2)
self.assertEqual(reader.read_signed(5), 7)
self.assertEqual(reader.read_signed(3), -3)
self.assertEqual(reader.read_signed(19), -181311)
reader.rewind()
self.assertEqual(reader.read_signed64(2), -2)
self.assertEqual(reader.read_signed64(3), -2)
self.assertEqual(reader.read_signed64(5), 7)
self.assertEqual(reader.read_signed64(3), -3)
self.assertEqual(reader.read_signed64(19), -181311)
reader.rewind()
self.assertEqual(reader.unary(0), 1)
self.assertEqual(reader.unary(0), 2)
self.assertEqual(reader.unary(0), 0)
self.assertEqual(reader.unary(0), 0)
self.assertEqual(reader.unary(0), 4)
reader.rewind()
self.assertEqual(reader.unary(1), 0)
self.assertEqual(reader.unary(1), 1)
self.assertEqual(reader.unary(1), 0)
self.assertEqual(reader.unary(1), 3)
self.assertEqual(reader.unary(1), 0)
reader.rewind()
self.assertEqual(reader.limited_unary(0, 2), 1)
self.assertEqual(reader.limited_unary(0, 2), None)
reader.rewind()
self.assertEqual(reader.limited_unary(1, 2), 0)
self.assertEqual(reader.limited_unary(1, 2), 1)
self.assertEqual(reader.limited_unary(1, 2), 0)
self.assertEqual(reader.limited_unary(1, 2), None)
reader.rewind()
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 4)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 2)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 2)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 2)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 4)
self.assertEqual(reader.read_huffman_code(table), 2)
reader.rewind()
self.assertEqual(reader.read(3), 5)
reader.byte_align()
self.assertEqual(reader.read(3), 7)
reader.byte_align()
reader.byte_align()
self.assertEqual(reader.read(8), 59)
reader.byte_align()
self.assertEqual(reader.read(4), 12)
reader.rewind()
self.assertEqual(reader.read_bytes(2), "\xB1\xED")
reader.rewind()
self.assertEqual(reader.read(4), 11)
self.assertEqual(reader.read_bytes(2), "\x1E\xD3")
reader.rewind()
self.assertEqual(reader.read(3), 5)
reader.set_endianness(1)
self.assertEqual(reader.read(3), 5)
reader.set_endianness(0)
self.assertEqual(reader.read(4), 3)
reader.set_endianness(0)
self.assertEqual(reader.read(4), 12)
reader.rewind()
reader.mark()
self.assertEqual(reader.read(4), 0xB)
reader.rewind()
self.assertEqual(reader.read(8), 0xB1)
reader.rewind()
self.assertEqual(reader.read(12), 0xB1E)
reader.unmark()
reader.mark()
self.assertEqual(reader.read(4), 0xD)
reader.rewind()
self.assertEqual(reader.read(8), 0xD3)
reader.rewind()
self.assertEqual(reader.read(12), 0xD3B)
reader.unmark()
reader.rewind()
reader.unmark()
def __test_little_endian_reader__(self, reader, table):
#check the bitstream reader
#against some known little-endian values
reader.mark()
self.assertEqual(reader.read(2), 0x1)
self.assertEqual(reader.read(3), 0x4)
self.assertEqual(reader.read(5), 0x0D)
self.assertEqual(reader.read(3), 0x3)
self.assertEqual(reader.read(19), 0x609DF)
reader.rewind()
self.assertEqual(reader.read64(2), 1)
self.assertEqual(reader.read64(3), 4)
self.assertEqual(reader.read64(5), 13)
self.assertEqual(reader.read64(3), 3)
self.assertEqual(reader.read64(19), 395743)
reader.rewind()
self.assertEqual(reader.read(2), 0x1)
reader.skip(3)
self.assertEqual(reader.read(5), 0x0D)
reader.skip(3)
self.assertEqual(reader.read(19), 0x609DF)
reader.rewind()
self.assertEqual(reader.read(1), 1)
bit = reader.read(1)
self.assertEqual(bit, 0)
reader.unread(bit)
self.assertEqual(reader.read(4), 8)
reader.byte_align()
reader.rewind()
self.assertEqual(reader.read(8), 0xB1)
reader.unread(0)
self.assertEqual(reader.read(1), 0)
reader.unread(1)
self.assertEqual(reader.read(1), 1)
reader.rewind()
self.assertEqual(reader.read_signed(2), 1)
self.assertEqual(reader.read_signed(3), -4)
self.assertEqual(reader.read_signed(5), 13)
self.assertEqual(reader.read_signed(3), 3)
self.assertEqual(reader.read_signed(19), -128545)
reader.rewind()
self.assertEqual(reader.read_signed64(2), 1)
self.assertEqual(reader.read_signed64(3), -4)
self.assertEqual(reader.read_signed64(5), 13)
self.assertEqual(reader.read_signed64(3), 3)
self.assertEqual(reader.read_signed64(19), -128545)
reader.rewind()
self.assertEqual(reader.unary(0), 1)
self.assertEqual(reader.unary(0), 0)
self.assertEqual(reader.unary(0), 0)
self.assertEqual(reader.unary(0), 2)
self.assertEqual(reader.unary(0), 2)
reader.rewind()
self.assertEqual(reader.unary(1), 0)
self.assertEqual(reader.unary(1), 3)
self.assertEqual(reader.unary(1), 0)
self.assertEqual(reader.unary(1), 1)
self.assertEqual(reader.unary(1), 0)
reader.rewind()
self.assertEqual(reader.limited_unary(0, 2), 1)
self.assertEqual(reader.limited_unary(0, 2), 0)
self.assertEqual(reader.limited_unary(0, 2), 0)
self.assertEqual(reader.limited_unary(0, 2), None)
reader.rewind()
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 3)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 2)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 0)
self.assertEqual(reader.read_huffman_code(table), 1)
self.assertEqual(reader.read_huffman_code(table), 2)
self.assertEqual(reader.read_huffman_code(table), 4)
self.assertEqual(reader.read_huffman_code(table), 3)
reader.rewind()
self.assertEqual(reader.read_bytes(2), "\xB1\xED")
reader.rewind()
self.assertEqual(reader.read(4), 1)
self.assertEqual(reader.read_bytes(2), "\xDB\xBE")
reader.rewind()
self.assertEqual(reader.read(3), 1)
reader.byte_align()
self.assertEqual(reader.read(3), 5)
reader.byte_align()
reader.byte_align()
self.assertEqual(reader.read(8), 59)
reader.byte_align()
self.assertEqual(reader.read(4), 1)
reader.rewind()
self.assertEqual(reader.read(3), 1)
reader.set_endianness(0)
self.assertEqual(reader.read(3), 7)
reader.set_endianness(1)
self.assertEqual(reader.read(4), 11)
reader.set_endianness(1)
self.assertEqual(reader.read(4), 1)
reader.rewind()
self.assertEqual(reader.limited_unary(1, 2), 0)
self.assertEqual(reader.limited_unary(1, 2), None)
reader.rewind()
reader.mark()
self.assertEqual(reader.read(4), 0x1)
reader.rewind()
self.assertEqual(reader.read(8), 0xB1)
reader.rewind()
self.assertEqual(reader.read(12), 0xDB1)
reader.unmark()
reader.mark()
self.assertEqual(reader.read(4), 0xE)
reader.rewind()
self.assertEqual(reader.read(8), 0xBE)
reader.rewind()
self.assertEqual(reader.read(12), 0x3BE)
reader.unmark()
reader.rewind()
reader.unmark()
def __test_try__(self, reader, table):
reader.mark()
#bounce to the very end of the stream
reader.skip(31)
reader.mark()
self.assertEqual(reader.read(1), 1)
reader.rewind()
#then test all the read methods to ensure they trigger br_abort
#in the case of unary/Huffman, the stream ends on a "1" bit
#whether reading it big-endian or little-endian
self.assertRaises(IOError, reader.read, 2)
reader.rewind()
self.assertRaises(IOError, reader.read64, 2)
reader.rewind()
self.assertRaises(IOError, reader.read_signed, 2)
reader.rewind()
self.assertRaises(IOError, reader.read_signed64, 2)
reader.rewind()
self.assertRaises(IOError, reader.skip, 2)
reader.rewind()
self.assertRaises(IOError, reader.unary, 0)
reader.rewind()
self.assertEqual(reader.unary(1), 0)
self.assertRaises(IOError, reader.unary, 1)
reader.rewind()
self.assertRaises(IOError, reader.limited_unary, 0, 3)
reader.rewind()
self.assertEqual(reader.limited_unary(1, 3), 0)
self.assertRaises(IOError, reader.limited_unary, 1, 3)
reader.rewind()
self.assertRaises(IOError, reader.read_huffman_code, table)
reader.rewind()
self.assertRaises(IOError, reader.read_bytes, 2)
reader.rewind()
self.assertRaises(IOError, reader.substream, 1)
reader.unmark()
reader.rewind()
reader.unmark()
def __test_callbacks_reader__(self,
reader,
unary_0_reads,
unary_1_reads,
table,
huffman_code_count):
counter = ByteCounter()
reader.mark()
reader.add_callback(counter.callback)
#a single callback
counter.reset()
for i in xrange(8):
reader.read(4)
self.assertEqual(int(counter), 4)
reader.rewind()
#calling callbacks directly
counter.reset()
for i in xrange(20):
reader.call_callbacks(0)
self.assertEqual(int(counter), 20)
#two callbacks
counter.reset()
reader.add_callback(counter.callback)
for i in xrange(8):
reader.read(4)
self.assertEqual(int(counter), 8)
reader.pop_callback()
reader.rewind()
#temporarily suspending the callback
counter.reset()
reader.read(8)
self.assertEqual(int(counter), 1)
callback = reader.pop_callback()
reader.read(8)
reader.read(8)
reader.add_callback(counter.callback)
reader.read(8)
self.assertEqual(int(counter), 2)
reader.rewind()
#temporarily adding two callbacks
counter.reset()
reader.read(8)
self.assertEqual(int(counter), 1)
reader.add_callback(counter.callback)
reader.read(8)
reader.read(8)
reader.pop_callback()
reader.read(8)
self.assertEqual(int(counter), 6)
reader.rewind()
#read_signed
counter.reset()
for i in xrange(8):
reader.read_signed(4)
self.assertEqual(int(counter), 4)
reader.rewind()
#read_64
counter.reset()
for i in xrange(8):
reader.read64(4)
self.assertEqual(int(counter), 4)
reader.rewind()
#skip
counter.reset()
for i in xrange(8):
reader.skip(4)
self.assertEqual(int(counter), 4)
reader.rewind()
#read_unary
counter.reset()
for i in xrange(unary_0_reads):
reader.unary(0)
self.assertEqual(int(counter), 4)
counter.reset()
reader.rewind()
for i in xrange(unary_1_reads):
reader.unary(1)
self.assertEqual(int(counter), 4)
reader.rewind()
#read_limited_unary
counter.reset()
for i in xrange(unary_0_reads):
reader.limited_unary(0, 6)
self.assertEqual(int(counter), 4)
counter.reset()
reader.rewind()
for i in xrange(unary_1_reads):
reader.limited_unary(1, 6)
self.assertEqual(int(counter), 4)
reader.rewind()
#read_huffman_code
counter.reset()
for i in xrange(huffman_code_count):
reader.read_huffman_code(table)
self.assertEqual(int(counter), 4)
reader.rewind()
#read_bytes
counter.reset()
reader.read_bytes(2)
reader.read_bytes(2)
self.assertEqual(int(counter), 4)
reader.rewind()
reader.pop_callback()
reader.unmark()
@LIB_BITSTREAM
def test_init_error(self):
from audiotools.bitstream import BitstreamAccumulator
from audiotools.bitstream import BitstreamReader
from audiotools.bitstream import BitstreamRecorder
from audiotools.bitstream import BitstreamWriter
self.assertRaises(TypeError, BitstreamAccumulator)
self.assertRaises(TypeError, BitstreamAccumulator, None)
self.assertRaises(TypeError, BitstreamRecorder)
self.assertRaises(TypeError, BitstreamRecorder, None)
self.assertRaises(TypeError, BitstreamWriter)
self.assertRaises(TypeError, BitstreamReader)
@LIB_BITSTREAM
def test_simple_reader(self):
from audiotools.bitstream import BitstreamReader, HuffmanTree
temp = tempfile.TemporaryFile()
try:
temp.write(chr(0xB1))
temp.write(chr(0xED))
temp.write(chr(0x3B))
temp.write(chr(0xC1))
temp.seek(0, 0)
#test a big-endian stream built from a file
reader = BitstreamReader(temp, 0)
table_be = HuffmanTree([[1, 1], 0,
[1, 0], 1,
[0, 1], 2,
[0, 0, 1], 3,
[0, 0, 0], 4], 0)
self.__test_big_endian_reader__(reader, table_be)
self.__test_try__(reader, table_be)
self.__test_callbacks_reader__(reader, 14, 18, table_be, 14)
temp.seek(0, 0)
#test a little-endian stream built from a file
reader = BitstreamReader(temp, 1)
table_le = HuffmanTree([[1, 1], 0,
[1, 0], 1,
[0, 1], 2,
[0, 0, 1], 3,
[0, 0, 0], 4], 1)
self.__test_little_endian_reader__(reader, table_le)
self.__test_try__(reader, table_le)
self.__test_callbacks_reader__(reader, 14, 18, table_le, 13)
#pad the stream with some additional data at both ends
temp.seek(0, 0)
temp.write(chr(0xFF))
temp.write(chr(0xFF))
temp.write(chr(0xB1))
temp.write(chr(0xED))
temp.write(chr(0x3B))
temp.write(chr(0xC1))
temp.write(chr(0xFF))
temp.write(chr(0xFF))
temp.flush()
temp.seek(0, 0)
reader = BitstreamReader(temp, 0)
reader.mark()
#check a big-endian substream built from a file
reader.skip(16)
subreader = reader.substream(4)
self.__test_big_endian_reader__(subreader, table_be)
self.__test_try__(subreader, table_be)
self.__test_callbacks_reader__(subreader, 14, 18, table_be, 13)
#check a big-endian substream built from another substream
reader.rewind()
reader.skip(8)
subreader1 = reader.substream(6)
subreader1.skip(8)
subreader2 = subreader.substream(4)
self.__test_big_endian_reader__(subreader2, table_be)
self.__test_try__(subreader2, table_be)
self.__test_callbacks_reader__(subreader2, 14, 18, table_be, 13)
reader.unmark()
temp.seek(0, 0)
reader = BitstreamReader(temp, 1)
reader.mark()
#check a little-endian substream built from a file
reader.skip(16)
subreader = reader.substream(4)
self.__test_little_endian_reader__(subreader, table_le)
self.__test_try__(subreader, table_le)
self.__test_callbacks_reader__(subreader, 14, 18, table_le, 13)
#check a little-endian substream built from another substream
reader.rewind()
reader.skip(8)
subreader1 = reader.substream(6)
subreader1.skip(8)
subreader2 = subreader.substream(4)
self.__test_little_endian_reader__(subreader2, table_le)
self.__test_try__(subreader2, table_le)
self.__test_callbacks_reader__(subreader2, 14, 18, table_le, 13)
reader.unmark()
#test the writer functions with each endianness
self.__test_writer__(0)
self.__test_writer__(1)
finally:
temp.close()
def __test_edge_reader_be__(self, reader):
reader.mark()
#try the unsigned 32 and 64 bit values
reader.rewind()
self.assertEqual(reader.read(32), 0)
self.assertEqual(reader.read(32), 4294967295)
self.assertEqual(reader.read(32), 2147483648)
self.assertEqual(reader.read(32), 2147483647)
self.assertEqual(reader.read64(64), 0)
self.assertEqual(reader.read64(64), 0xFFFFFFFFFFFFFFFFL)
self.assertEqual(reader.read64(64), 9223372036854775808L)
self.assertEqual(reader.read64(64), 9223372036854775807L)
#try the signed 32 and 64 bit values
reader.rewind()
self.assertEqual(reader.read_signed(32), 0)
self.assertEqual(reader.read_signed(32), -1)
self.assertEqual(reader.read_signed(32), -2147483648)
self.assertEqual(reader.read_signed(32), 2147483647)
self.assertEqual(reader.read_signed64(64), 0)
self.assertEqual(reader.read_signed64(64), -1)
self.assertEqual(reader.read_signed64(64), -9223372036854775808L)
self.assertEqual(reader.read_signed64(64), 9223372036854775807L)
#try the unsigned values via parse()
reader.rewind()
(u_val_1,
u_val_2,
u_val_3,
u_val_4,
u_val64_1,
u_val64_2,
u_val64_3,
u_val64_4) = reader.parse("32u 32u 32u 32u 64U 64U 64U 64U")
self.assertEqual(u_val_1, 0)
self.assertEqual(u_val_2, 4294967295)
self.assertEqual(u_val_3, 2147483648)
self.assertEqual(u_val_4, 2147483647)
self.assertEqual(u_val64_1, 0)
self.assertEqual(u_val64_2, 0xFFFFFFFFFFFFFFFFL)
self.assertEqual(u_val64_3, 9223372036854775808L)
self.assertEqual(u_val64_4, 9223372036854775807L)
#try the signed values via parse()
reader.rewind()
(s_val_1,
s_val_2,
s_val_3,
s_val_4,
s_val64_1,
s_val64_2,
s_val64_3,
s_val64_4) = reader.parse("32s 32s 32s 32s 64S 64S 64S 64S")
self.assertEqual(s_val_1, 0)
self.assertEqual(s_val_2, -1)
self.assertEqual(s_val_3, -2147483648)
self.assertEqual(s_val_4, 2147483647)
self.assertEqual(s_val64_1, 0)
self.assertEqual(s_val64_2, -1)
self.assertEqual(s_val64_3, -9223372036854775808L)
self.assertEqual(s_val64_4, 9223372036854775807L)
reader.unmark()
def __test_edge_reader_le__(self, reader):
reader.mark()
#try the unsigned 32 and 64 bit values
self.assertEqual(reader.read(32), 0)
self.assertEqual(reader.read(32), 4294967295)
self.assertEqual(reader.read(32), 2147483648)
self.assertEqual(reader.read(32), 2147483647)
self.assertEqual(reader.read64(64), 0)
self.assertEqual(reader.read64(64), 0xFFFFFFFFFFFFFFFFL)
self.assertEqual(reader.read64(64), 9223372036854775808L)
self.assertEqual(reader.read64(64), 9223372036854775807L)
#try the signed 32 and 64 bit values
reader.rewind()
self.assertEqual(reader.read_signed(32), 0)
self.assertEqual(reader.read_signed(32), -1)
self.assertEqual(reader.read_signed(32), -2147483648)
self.assertEqual(reader.read_signed(32), 2147483647)
self.assertEqual(reader.read_signed64(64), 0)
self.assertEqual(reader.read_signed64(64), -1)
self.assertEqual(reader.read_signed64(64), -9223372036854775808L)
self.assertEqual(reader.read_signed64(64), 9223372036854775807L)
#try the unsigned values via parse()
reader.rewind()
(u_val_1,
u_val_2,
u_val_3,
u_val_4,
u_val64_1,
u_val64_2,
u_val64_3,
u_val64_4) = reader.parse("32u 32u 32u 32u 64U 64U 64U 64U")
self.assertEqual(u_val_1, 0)
self.assertEqual(u_val_2, 4294967295)
self.assertEqual(u_val_3, 2147483648)
self.assertEqual(u_val_4, 2147483647)
self.assertEqual(u_val64_1, 0)
self.assertEqual(u_val64_2, 0xFFFFFFFFFFFFFFFFL)
self.assertEqual(u_val64_3, 9223372036854775808L)
self.assertEqual(u_val64_4, 9223372036854775807L)
#try the signed values via parse()
reader.rewind()
(s_val_1,
s_val_2,
s_val_3,
s_val_4,
s_val64_1,
s_val64_2,
s_val64_3,
s_val64_4) = reader.parse("32s 32s 32s 32s 64S 64S 64S 64S")
self.assertEqual(s_val_1, 0)
self.assertEqual(s_val_2, -1)
self.assertEqual(s_val_3, -2147483648)
self.assertEqual(s_val_4, 2147483647)
self.assertEqual(s_val64_1, 0)
self.assertEqual(s_val64_2, -1)
self.assertEqual(s_val64_3, -9223372036854775808L)
self.assertEqual(s_val64_4, 9223372036854775807L)
reader.unmark()
def __test_edge_writer__(self, get_writer, validate_writer):
#try the unsigned 32 and 64 bit values
(writer, temp) = get_writer()
writer.write(32, 0)
writer.write(32, 4294967295)
writer.write(32, 2147483648)
writer.write(32, 2147483647)
writer.write64(64, 0)
writer.write64(64, 0xFFFFFFFFFFFFFFFFL)
writer.write64(64, 9223372036854775808L)
writer.write64(64, 9223372036854775807L)
validate_writer(writer, temp)
#try the signed 32 and 64 bit values
(writer, temp) = get_writer()
writer.write_signed(32, 0)
writer.write_signed(32, -1)
writer.write_signed(32, -2147483648)
writer.write_signed(32, 2147483647)
writer.write_signed64(64, 0)
writer.write_signed64(64, -1)
writer.write_signed64(64, -9223372036854775808L)
writer.write_signed64(64, 9223372036854775807L)
validate_writer(writer, temp)
#try the unsigned values via build()
(writer, temp) = get_writer()
u_val_1 = 0
u_val_2 = 4294967295
u_val_3 = 2147483648
u_val_4 = 2147483647
u_val64_1 = 0
u_val64_2 = 0xFFFFFFFFFFFFFFFFL
u_val64_3 = 9223372036854775808L
u_val64_4 = 9223372036854775807L
writer.build("32u 32u 32u 32u 64U 64U 64U 64U",
[u_val_1, u_val_2, u_val_3, u_val_4,
u_val64_1, u_val64_2, u_val64_3, u_val64_4])
validate_writer(writer, temp)
#try the signed values via build()
(writer, temp) = get_writer()
s_val_1 = 0
s_val_2 = -1
s_val_3 = -2147483648
s_val_4 = 2147483647
s_val64_1 = 0
s_val64_2 = -1
s_val64_3 = -9223372036854775808L
s_val64_4 = 9223372036854775807L
writer.build("32s 32s 32s 32s 64S 64S 64S 64S",
[s_val_1, s_val_2, s_val_3, s_val_4,
s_val64_1, s_val64_2, s_val64_3, s_val64_4])
validate_writer(writer, temp)
def __get_edge_writer_be__(self):
from audiotools.bitstream import BitstreamWriter
temp_file = tempfile.NamedTemporaryFile()
return (BitstreamWriter(open(temp_file.name, "wb"), 0), temp_file)
def __validate_edge_writer_be__(self, writer, temp_file):
writer.close()
self.assertEqual(open(temp_file.name, "rb").read(),
"".join(map(chr,
[0, 0, 0, 0, 255, 255, 255, 255,
128, 0, 0, 0, 127, 255, 255, 255,
0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255,
128, 0, 0, 0, 0, 0, 0, 0,
127, 255, 255, 255, 255, 255, 255, 255])))
temp_file.close()
def __get_edge_recorder_be__(self):
from audiotools.bitstream import BitstreamRecorder
return (BitstreamRecorder(0), tempfile.NamedTemporaryFile())
def __validate_edge_recorder_be__(self, writer, temp_file):
from audiotools.bitstream import BitstreamWriter
writer2 = BitstreamWriter(open(temp_file.name, "wb"), 0)
writer.copy(writer2)
writer2.close()
self.assertEqual(open(temp_file.name, "rb").read(),
"".join(map(chr,
[0, 0, 0, 0, 255, 255, 255, 255,
128, 0, 0, 0, 127, 255, 255, 255,
0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255,
128, 0, 0, 0, 0, 0, 0, 0,
127, 255, 255, 255, 255, 255, 255, 255])))
temp_file.close()
def __get_edge_accumulator_be__(self):
from audiotools.bitstream import BitstreamAccumulator
return (BitstreamAccumulator(0), None)
def __validate_edge_accumulator_be__(self, writer, temp_file):
self.assertEqual(writer.bits(), 48 * 8)
def __get_edge_writer_le__(self):
from audiotools.bitstream import BitstreamWriter
temp_file = tempfile.NamedTemporaryFile()
return (BitstreamWriter(open(temp_file.name, "wb"), 1), temp_file)
def __validate_edge_writer_le__(self, writer, temp_file):
writer.close()
self.assertEqual(open(temp_file.name, "rb").read(),
"".join(map(chr,
[0, 0, 0, 0, 255, 255, 255, 255,
0, 0, 0, 128, 255, 255, 255, 127,
0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255,
0, 0, 0, 0, 0, 0, 0, 128,
255, 255, 255, 255, 255, 255, 255, 127])))
temp_file.close()
def __get_edge_recorder_le__(self):
from audiotools.bitstream import BitstreamRecorder
return (BitstreamRecorder(1), tempfile.NamedTemporaryFile())
def __validate_edge_recorder_le__(self, writer, temp_file):
from audiotools.bitstream import BitstreamWriter
writer2 = BitstreamWriter(open(temp_file.name, "wb"), 1)
writer.copy(writer2)
writer2.close()
self.assertEqual(open(temp_file.name, "rb").read(),
"".join(map(chr,
[0, 0, 0, 0, 255, 255, 255, 255,
0, 0, 0, 128, 255, 255, 255, 127,
0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255,
0, 0, 0, 0, 0, 0, 0, 128,
255, 255, 255, 255, 255, 255, 255, 127])))
temp_file.close()
def __get_edge_accumulator_le__(self):
from audiotools.bitstream import BitstreamAccumulator
return (BitstreamAccumulator(1), None)
def __validate_edge_accumulator_le__(self, writer, temp_file):
self.assertEqual(writer.bits(), 48 * 8)
def __test_writer__(self, endianness):
from audiotools.bitstream import BitstreamWriter
from audiotools.bitstream import BitstreamRecorder
from audiotools.bitstream import BitstreamAccumulator
checks = [self.__writer_perform_write__,
self.__writer_perform_write_signed__,
self.__writer_perform_write_64__,
self.__writer_perform_write_signed_64__,
self.__writer_perform_write_unary_0__,
self.__writer_perform_write_unary_1__]
#perform file-based checks
for check in checks:
temp = tempfile.NamedTemporaryFile()
try:
writer = BitstreamWriter(open(temp.name, "wb"), endianness)
check(writer, endianness)
writer.close()
self.__check_output_file__(temp)
finally:
temp.close()
data = cStringIO.StringIO()
writer = BitstreamWriter(data, endianness)
check(writer, endianness)
del(writer)
self.assertEqual(data.getvalue(), "\xB1\xED\x3B\xC1")
#perform recorder-based checks
for check in checks:
temp = tempfile.NamedTemporaryFile()
try:
writer = BitstreamWriter(open(temp.name, "wb"), endianness)
recorder = BitstreamRecorder(endianness)
check(recorder, endianness)
recorder.copy(writer)
writer.close()
self.__check_output_file__(temp)
self.assertEqual(recorder.bits(), 32)
finally:
temp.close()
#perform accumulator-based checks
for check in checks:
writer = BitstreamAccumulator(endianness)
check(writer, endianness)
self.assertEqual(writer.bits(), 32)
#check swap records
temp = tempfile.NamedTemporaryFile()
try:
writer = BitstreamWriter(open(temp.name, "wb"), endianness)
recorder1 = BitstreamRecorder(endianness)
recorder2 = BitstreamRecorder(endianness)
recorder2.write(8, 0xB1)
recorder2.write(8, 0xED)
recorder1.write(8, 0x3B)
recorder1.write(8, 0xC1)
recorder1.swap(recorder2)
recorder1.copy(writer)
recorder2.copy(writer)
writer.close()
self.__check_output_file__(temp)
finally:
temp.close()
#check recorder reset
temp = tempfile.NamedTemporaryFile()
try:
writer = BitstreamWriter(open(temp.name, "wb"), endianness)
recorder = BitstreamRecorder(endianness)
recorder.write(8, 0xAA)
recorder.write(8, 0xBB)
recorder.write(8, 0xCC)
recorder.write(8, 0xDD)
recorder.write(8, 0xEE)
recorder.reset()
recorder.write(8, 0xB1)
recorder.write(8, 0xED)
recorder.write(8, 0x3B)
recorder.write(8, 0xC1)
recorder.copy(writer)
writer.close()
self.__check_output_file__(temp)
finally:
temp.close()
#check endianness setting
#FIXME
#check a file-based byte-align
#FIXME
#check a recorder-based byte-align
#FIXME
#check an accumulator-based byte-align
#FIXME
#check a partial dump
#FIXME
#check that recorder->recorder->file works
for check in checks:
temp = tempfile.NamedTemporaryFile()
try:
writer = BitstreamWriter(open(temp.name, "wb"), endianness)
recorder1 = BitstreamRecorder(endianness)
recorder2 = BitstreamRecorder(endianness)
self.assertEqual(recorder1.bits(), 0)
self.assertEqual(recorder2.bits(), 0)
check(recorder2, endianness)
self.assertEqual(recorder1.bits(), 0)
self.assertEqual(recorder2.bits(), 32)
recorder2.copy(recorder1)
self.assertEqual(recorder1.bits(), 32)
self.assertEqual(recorder2.bits(), 32)
recorder1.copy(writer)
writer.close()
self.__check_output_file__(temp)
finally:
temp.close()
#check that recorder->accumulator works
for check in checks:
recorder = BitstreamRecorder(endianness)
accumulator = BitstreamAccumulator(endianness)
self.assertEqual(recorder.bits(), 0)
self.assertEqual(accumulator.bits(), 0)
check(recorder, endianness)
self.assertEqual(recorder.bits(), 32)
self.assertEqual(accumulator.bits(), 0)
recorder.copy(accumulator)
self.assertEqual(recorder.bits(), 32)
self.assertEqual(accumulator.bits(), 32)
def __writer_perform_write__(self, writer, endianness):
if (endianness == 0):
writer.write(2, 2)
writer.write(3, 6)
writer.write(5, 7)
writer.write(3, 5)
writer.write(19, 342977)
else:
writer.write(2, 1)
writer.write(3, 4)
writer.write(5, 13)
writer.write(3, 3)
writer.write(19, 395743)
def __writer_perform_write_signed__(self, writer, endianness):
if (endianness == 0):
writer.write_signed(2, -2)
writer.write_signed(3, -2)
writer.write_signed(5, 7)
writer.write_signed(3, -3)
writer.write_signed(19, -181311)
else:
writer.write_signed(2, 1)
writer.write_signed(3, -4)
writer.write_signed(5, 13)
writer.write_signed(3, 3)
writer.write_signed(19, -128545)
def __writer_perform_write_64__(self, writer, endianness):
if (endianness == 0):
writer.write64(2, 2)
writer.write64(3, 6)
writer.write64(5, 7)
writer.write64(3, 5)
writer.write64(19, 342977)
else:
writer.write64(2, 1)
writer.write64(3, 4)
writer.write64(5, 13)
writer.write64(3, 3)
writer.write64(19, 395743)
def __writer_perform_write_signed_64__(self, writer, endianness):
if (endianness == 0):
writer.write_signed64(2, -2)
writer.write_signed64(3, -2)
writer.write_signed64(5, 7)
writer.write_signed64(3, -3)
writer.write_signed64(19, -181311)
else:
writer.write_signed64(2, 1)
writer.write_signed64(3, -4)
writer.write_signed64(5, 13)
writer.write_signed64(3, 3)
writer.write_signed64(19, -128545)
def __writer_perform_write_unary_0__(self, writer, endianness):
if (endianness == 0):
writer.unary(0, 1)
writer.unary(0, 2)
writer.unary(0, 0)
writer.unary(0, 0)
writer.unary(0, 4)
writer.unary(0, 2)
writer.unary(0, 1)
writer.unary(0, 0)
writer.unary(0, 3)
writer.unary(0, 4)
writer.unary(0, 0)
writer.unary(0, 0)
writer.unary(0, 0)
writer.unary(0, 0)
writer.write(1, 1)
else:
writer.unary(0, 1)
writer.unary(0, 0)
writer.unary(0, 0)
writer.unary(0, 2)
writer.unary(0, 2)
writer.unary(0, 2)
writer.unary(0, 5)
writer.unary(0, 3)
writer.unary(0, 0)
writer.unary(0, 1)
writer.unary(0, 0)
writer.unary(0, 0)
writer.unary(0, 0)
writer.unary(0, 0)
writer.write(2, 3)
def __writer_perform_write_unary_1__(self, writer, endianness):
if (endianness == 0):
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 3)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 2)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 5)
else:
writer.unary(1, 0)
writer.unary(1, 3)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 1)
writer.unary(1, 0)
writer.unary(1, 0)
writer.unary(1, 2)
writer.unary(1, 5)
writer.unary(1, 0)
def __check_output_file__(self, temp_file):
self.assertEqual(open(temp_file.name, "rb").read(), "\xB1\xED\x3B\xC1")
@LIB_BITSTREAM
def test_edge_cases(self):
from audiotools.bitstream import BitstreamReader
temp = tempfile.NamedTemporaryFile()
try:
#write the temp file with a set of known big-endian data
temp.write("".join(map(chr,
[0, 0, 0, 0, 255, 255, 255, 255,
128, 0, 0, 0, 127, 255, 255, 255,
0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255,
128, 0, 0, 0, 0, 0, 0, 0,
127, 255, 255, 255, 255, 255, 255, 255])))
temp.flush()
#ensure a big-endian reader reads the values correctly
reader = BitstreamReader(open(temp.name, "rb"), 0)
self.__test_edge_reader_be__(reader)
del(reader)
#ensure a big-endian sub-reader reads the values correctly
reader = BitstreamReader(open(temp.name, "rb"), 0)
subreader = reader.substream(48)
self.__test_edge_reader_be__(subreader)
finally:
temp.close()
temp = tempfile.NamedTemporaryFile()
try:
#write the temp file with a collection of known little-endian data
temp.write("".join(map(chr,
[0, 0, 0, 0, 255, 255, 255, 255,
0, 0, 0, 128, 255, 255, 255, 127,
0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255,
0, 0, 0, 0, 0, 0, 0, 128,
255, 255, 255, 255, 255, 255, 255, 127])))
temp.flush()
#ensure a little-endian reader reads the values correctly
reader = BitstreamReader(open(temp.name, "rb"), 1)
self.__test_edge_reader_le__(reader)
del(reader)
#ensure a little-endian sub-reader reads the values correctly
reader = BitstreamReader(open(temp.name, "rb"), 1)
subreader = reader.substream(48)
self.__test_edge_reader_be__(subreader)
finally:
temp.close()
#test a bunch of big-endian values via the bitstream writer
self.__test_edge_writer__(self.__get_edge_writer_be__,
self.__validate_edge_writer_be__)
#test a bunch of big-endian values via the bitstream recorder
self.__test_edge_writer__(self.__get_edge_recorder_be__,
self.__validate_edge_recorder_be__)
#test a bunch of big-endian values via the bitstream accumulator
self.__test_edge_writer__(self.__get_edge_accumulator_be__,
self.__validate_edge_accumulator_be__)
#test a bunch of little-endian values via the bitstream writer
self.__test_edge_writer__(self.__get_edge_writer_le__,
self.__validate_edge_writer_le__)
#test a bunch of little-endian values via the bitstream recorder
self.__test_edge_writer__(self.__get_edge_recorder_le__,
self.__validate_edge_recorder_le__)
#test a bunch of little-endian values via the bitstream accumulator
self.__test_edge_writer__(self.__get_edge_accumulator_le__,
self.__validate_edge_accumulator_le__)
@LIB_BITSTREAM
def test_python_reader(self):
from audiotools.bitstream import BitstreamReader
#Vanilla, file-based BitstreamReader uses a 1 character buffer
#and relies on stdio to perform buffering which is fast enough.
#Therefore, a byte-aligned file can be seek()ed at will.
#However, making lots of read(1) calls on a Python object
#is unacceptably slow.
#Therefore, we read a 4KB string and pull individual bytes from
#it as needed, which should keep performance reasonable.
def new_temp1():
temp = cStringIO.StringIO()
temp.write(chr(0xB1))
temp.write(chr(0xED))
temp.write(chr(0x3B))
temp.write(chr(0xC1))
temp.seek(0, 0)
return temp
def new_temp2():
return __SimpleChunkReader__([chr(0xB1) +
chr(0xED) +
chr(0x3B) +
chr(0xC1)])
def new_temp3():
return __SimpleChunkReader__([chr(0xB1) +
chr(0xED),
chr(0x3B) +
chr(0xC1)])
def new_temp4():
return __SimpleChunkReader__([chr(0xB1),
chr(0xED),
chr(0x3B) +
chr(0xC1)])
def new_temp5():
return __SimpleChunkReader__([chr(0xB1),
chr(0xED),
chr(0x3B),
chr(0xC1)])
for new_temp in [new_temp1, new_temp2, new_temp3, new_temp4,
new_temp5]:
#first, check the bitstream reader
#against some simple known big-endian values
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.read(2), 2)
self.assertEqual(bitstream.read(3), 6)
self.assertEqual(bitstream.read(5), 7)
self.assertEqual(bitstream.read(3), 5)
self.assertEqual(bitstream.read(19), 342977)
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.read64(2), 2)
self.assertEqual(bitstream.read64(3), 6)
self.assertEqual(bitstream.read64(5), 7)
self.assertEqual(bitstream.read64(3), 5)
self.assertEqual(bitstream.read64(19), 342977)
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.read_signed(2), -2)
self.assertEqual(bitstream.read_signed(3), -2)
self.assertEqual(bitstream.read_signed(5), 7)
self.assertEqual(bitstream.read_signed(3), -3)
self.assertEqual(bitstream.read_signed(19), -181311)
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.unary(0), 1)
self.assertEqual(bitstream.unary(0), 2)
self.assertEqual(bitstream.unary(0), 0)
self.assertEqual(bitstream.unary(0), 0)
self.assertEqual(bitstream.unary(0), 4)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.unary(1), 0)
self.assertEqual(bitstream.unary(1), 1)
self.assertEqual(bitstream.unary(1), 0)
self.assertEqual(bitstream.unary(1), 3)
self.assertEqual(bitstream.unary(1), 0)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.read(1), 1)
bit = bitstream.read(1)
self.assertEqual(bit, 0)
bitstream.unread(bit)
self.assertEqual(bitstream.read(2), 1)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.read(8), 0xB1)
bitstream.unread(0)
self.assertEqual(bitstream.read(1), 0)
bitstream.unread(1)
self.assertEqual(bitstream.read(1), 1)
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.limited_unary(0, 2), 1)
self.assertEqual(bitstream.limited_unary(0, 2), None)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 0)
self.assertEqual(bitstream.limited_unary(1, 2), 0)
self.assertEqual(bitstream.limited_unary(1, 2), 1)
self.assertEqual(bitstream.limited_unary(1, 2), 0)
self.assertEqual(bitstream.limited_unary(1, 2), None)
bitstream = BitstreamReader(new_temp(), 0)
bitstream.mark()
self.assertEqual(bitstream.read(4), 0xB)
bitstream.rewind()
self.assertEqual(bitstream.read(8), 0xB1)
bitstream.rewind()
self.assertEqual(bitstream.read(12), 0xB1E)
bitstream.unmark()
bitstream.mark()
self.assertEqual(bitstream.read(4), 0xD)
bitstream.rewind()
self.assertEqual(bitstream.read(8), 0xD3)
bitstream.rewind()
self.assertEqual(bitstream.read(12), 0xD3B)
bitstream.unmark()
del(bitstream)
bitstream = BitstreamReader(new_temp(), 0)
#then, check the bitstream reader
#against some simple known little-endian values
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.read(2), 1)
self.assertEqual(bitstream.read(3), 4)
self.assertEqual(bitstream.read(5), 13)
self.assertEqual(bitstream.read(3), 3)
self.assertEqual(bitstream.read(19), 395743)
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.read64(2), 1)
self.assertEqual(bitstream.read64(3), 4)
self.assertEqual(bitstream.read64(5), 13)
self.assertEqual(bitstream.read64(3), 3)
self.assertEqual(bitstream.read64(19), 395743)
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.read_signed(2), 1)
self.assertEqual(bitstream.read_signed(3), -4)
self.assertEqual(bitstream.read_signed(5), 13)
self.assertEqual(bitstream.read_signed(3), 3)
self.assertEqual(bitstream.read_signed(19), -128545)
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.unary(0), 1)
self.assertEqual(bitstream.unary(0), 0)
self.assertEqual(bitstream.unary(0), 0)
self.assertEqual(bitstream.unary(0), 2)
self.assertEqual(bitstream.unary(0), 2)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.unary(1), 0)
self.assertEqual(bitstream.unary(1), 3)
self.assertEqual(bitstream.unary(1), 0)
self.assertEqual(bitstream.unary(1), 1)
self.assertEqual(bitstream.unary(1), 0)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.read(1), 1)
bit = bitstream.read(1)
self.assertEqual(bit, 0)
bitstream.unread(bit)
self.assertEqual(bitstream.read(4), 8)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.read(8), 0xB1)
bitstream.unread(0)
self.assertEqual(bitstream.read(1), 0)
bitstream.unread(1)
self.assertEqual(bitstream.read(1), 1)
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.limited_unary(0, 2), 1)
self.assertEqual(bitstream.limited_unary(0, 2), 0)
self.assertEqual(bitstream.limited_unary(0, 2), 0)
self.assertEqual(bitstream.limited_unary(0, 2), None)
bitstream.byte_align()
bitstream = BitstreamReader(new_temp(), 1)
self.assertEqual(bitstream.limited_unary(1, 2), 0)
self.assertEqual(bitstream.limited_unary(1, 2), None)
bitstream = BitstreamReader(new_temp(), 1)
bitstream.mark()
self.assertEqual(bitstream.read(4), 0x1)
bitstream.rewind()
self.assertEqual(bitstream.read(8), 0xB1)
bitstream.rewind()
self.assertEqual(bitstream.read(12), 0xDB1)
bitstream.unmark()
bitstream.mark()
self.assertEqual(bitstream.read(4), 0xE)
bitstream.rewind()
self.assertEqual(bitstream.read(8), 0xBE)
bitstream.rewind()
self.assertEqual(bitstream.read(12), 0x3BE)
bitstream.unmark()
@LIB_BITSTREAM
def test_simple_writer(self):
from audiotools.bitstream import BitstreamWriter
temp = tempfile.NamedTemporaryFile()
try:
#first, have the bitstream writer generate
#a set of known big-endian values
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 0)
bitstream.write(2, 2)
bitstream.write(3, 6)
bitstream.write(5, 7)
bitstream.write(3, 5)
bitstream.write(19, 342977)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 0)
bitstream.write64(2, 2)
bitstream.write64(3, 6)
bitstream.write64(5, 7)
bitstream.write64(3, 5)
bitstream.write64(19, 342977)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 0)
bitstream.write_signed(2, -2)
bitstream.write_signed(3, -2)
bitstream.write_signed(5, 7)
bitstream.write_signed(3, -3)
bitstream.write_signed(19, -181311)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 0)
bitstream.unary(0, 1)
bitstream.unary(0, 2)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.unary(0, 4)
bitstream.unary(0, 2)
bitstream.unary(0, 1)
bitstream.unary(0, 0)
bitstream.unary(0, 3)
bitstream.unary(0, 4)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.write(1, 1)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 3)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 2)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 5)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
#then, have the bitstream writer generate
#a set of known little-endian values
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 1)
bitstream.write(2, 1)
bitstream.write(3, 4)
bitstream.write(5, 13)
bitstream.write(3, 3)
bitstream.write(19, 395743)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 1)
bitstream.write64(2, 1)
bitstream.write64(3, 4)
bitstream.write64(5, 13)
bitstream.write64(3, 3)
bitstream.write64(19, 395743)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 1)
bitstream.write_signed(2, 1)
bitstream.write_signed(3, -4)
bitstream.write_signed(5, 13)
bitstream.write_signed(3, 3)
bitstream.write_signed(19, -128545)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 1)
bitstream.unary(0, 1)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.unary(0, 2)
bitstream.unary(0, 2)
bitstream.unary(0, 2)
bitstream.unary(0, 5)
bitstream.unary(0, 3)
bitstream.unary(0, 0)
bitstream.unary(0, 1)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.unary(0, 0)
bitstream.write(2, 3)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 3)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 1)
bitstream.unary(1, 0)
bitstream.unary(1, 0)
bitstream.unary(1, 2)
bitstream.unary(1, 5)
bitstream.unary(1, 0)
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0xB1, 0xED, 0x3B, 0xC1])
f = open(temp.name, "wb")
bitstream = BitstreamWriter(f, 1)
bitstream.write(4, 0x1)
bitstream.byte_align()
bitstream.write(4, 0xD)
bitstream.byte_align()
f.close()
del(bitstream)
self.assertEqual(map(ord, open(temp.name, "rb").read()),
[0x01, 0x0D])
finally:
temp.close()
#and have the bitstream reader check those values are accurate
@LIB_BITSTREAM
def test_reader_close(self):
from audiotools.bitstream import BitstreamReader, HuffmanTree
def test_reader(reader):
self.assertRaises(IOError, reader.read, 1)
self.assertRaises(IOError, reader.read64, 2)
self.assertRaises(IOError, reader.skip, 3)
self.assertRaises(IOError, reader.skip_bytes, 1)
self.assertRaises(IOError, reader.read_signed, 2)
self.assertRaises(IOError, reader.read_signed64, 3)
self.assertRaises(IOError, reader.unary, 1)
self.assertRaises(IOError, reader.limited_unary, 1, 2)
self.assertRaises(IOError, reader.read_bytes, 1)
self.assertRaises(IOError, reader.parse, "1b2b3b")
self.assertRaises(IOError, reader.substream, 2)
self.assertRaises(IOError, reader.read_huffman_code,
HuffmanTree([(1, ), 1,
(0, 1), 2,
(0, 0, 1), 3,
(0, 0, 0), 4], False))
def new_temp():
temp = cStringIO.StringIO()
temp.write(chr(0xB1))
temp.write(chr(0xED))
temp.write(chr(0x3B))
temp.write(chr(0xC1))
temp.seek(0, 0)
return temp
#test a BitstreamReader from a Python file object
f = open("test_core.py", "rb")
reader = BitstreamReader(f, 0)
reader.close()
test_reader(reader)
reader.set_endianness(1)
test_reader(reader)
reader = BitstreamReader(f, 1)
reader.close()
test_reader(reader)
reader.set_endianness(0)
test_reader(reader)
f.close()
del(f)
#test a BitstreamReader from a Python cStringIO object
reader = BitstreamReader(new_temp(), 0)
reader.close()
test_reader(reader)
reader.set_endianness(1)
test_reader(reader)
reader = BitstreamReader(new_temp(), 1)
reader.close()
test_reader(reader)
reader.set_endianness(0)
test_reader(reader)
@LIB_BITSTREAM
def test_writer_close(self):
from audiotools.bitstream import BitstreamWriter
from audiotools.bitstream import BitstreamRecorder
from audiotools.bitstream import BitstreamAccumulator
def test_writer(writer):
self.assertRaises(IOError, writer.write, 1, 1)
self.assertRaises(IOError, writer.write_signed, 2, 1)
self.assertRaises(IOError, writer.unary, 1, 1)
self.assertRaises(IOError, writer.write64, 1, 1)
self.assertRaises(IOError, writer.write_signed64, 2, 1)
self.assertRaises(IOError, writer.write_bytes, "foo")
self.assertRaises(IOError, writer.build, "1u2u3u", [0, 1, 2])
#test a BitstreamWriter to a Python file object
f = open("test.bin", "wb")
try:
writer = BitstreamWriter(f, 0)
writer.close()
test_writer(writer)
writer.set_endianness(1)
test_writer(writer)
f.close()
del(f)
finally:
os.unlink("test.bin")
f = open("test.bin", "wb")
try:
writer = BitstreamWriter(f, 1)
writer.close()
test_writer(writer)
writer.set_endianness(0)
test_writer(writer)
f.close()
del(f)
finally:
os.unlink("test.bin")
#test a BitstreamWriter to a Python cStringIO object
s = cStringIO.StringIO()
writer = BitstreamWriter(s, 0)
writer.close()
test_writer(writer)
writer.set_endianness(1)
test_writer(writer)
del(writer)
del(s)
s = cStringIO.StringIO()
writer = BitstreamWriter(s, 1)
writer.close()
test_writer(writer)
writer.set_endianness(0)
test_writer(writer)
del(writer)
del(s)
#test a BitstreamRecorder
writer = BitstreamRecorder(0)
writer.close()
test_writer(writer)
writer.set_endianness(1)
test_writer(writer)
del(writer)
writer = BitstreamRecorder(1)
writer.close()
test_writer(writer)
writer.set_endianness(0)
test_writer(writer)
del(writer)
#test a BitstreamAccumulator
writer = BitstreamAccumulator(0)
writer.close()
test_writer(writer)
writer.set_endianness(1)
test_writer(writer)
del(writer)
writer = BitstreamAccumulator(1)
writer.close()
test_writer(writer)
writer.set_endianness(0)
test_writer(writer)
del(writer)
class TestReplayGain(unittest.TestCase):
@LIB_REPLAYGAIN
def test_basics(self):
import audiotools.replaygain
import audiotools.pcm
from cStringIO import StringIO
#check for invalid sample rate
self.assertRaises(ValueError,
audiotools.replaygain.ReplayGain,
200000)
#check for a very small sample count
rg = audiotools.replaygain.ReplayGain(44100)
self.assertEqual(
rg.title_gain(audiotools.PCMReader(StringIO(""),
44100, 2, 0x3, 16)),
(0.0, 0.0))
self.assertRaises(ValueError, rg.album_gain)
#check for no tracks
assert(len(list(audiotools.calculate_replay_gain([]))) == 0)
#check for lots of invalid combinations for calculate_replay_gain
track_file1 = tempfile.NamedTemporaryFile(suffix=".wav")
track_file2 = tempfile.NamedTemporaryFile(suffix=".wav")
track_file3 = tempfile.NamedTemporaryFile(suffix=".wav")
try:
track1 = audiotools.WaveAudio.from_pcm(track_file1.name,
BLANK_PCM_Reader(2))
track2 = audiotools.WaveAudio.from_pcm(track_file2.name,
BLANK_PCM_Reader(3))
track3 = audiotools.WaveAudio.from_pcm(track_file3.name,
BLANK_PCM_Reader(2))
gain = list(audiotools.calculate_replay_gain(
[track1, track2, track3]))
self.assertEqual(len(gain), 3)
self.assert_(gain[0][0] is track1)
self.assert_(gain[1][0] is track2)
self.assert_(gain[2][0] is track3)
finally:
track_file1.close()
track_file2.close()
track_file3.close()
@LIB_REPLAYGAIN
def test_valid_rates(self):
import audiotools.replaygain
for sample_rate in [8000, 11025, 12000, 16000, 18900, 22050, 24000,
32000, 37800, 44100, 48000, 56000, 64000, 88200,
96000, 112000, 128000, 144000, 176400, 192000]:
gain = audiotools.replaygain.ReplayGain(sample_rate)
reader = test_streams.Simple_Sine(sample_rate * 2,
sample_rate,
0x4,
16,
(30000, sample_rate / 100))
(gain, peak) = gain.title_gain(reader)
self.assert_(gain < -4.0)
self.assert_(peak > .90)
@LIB_REPLAYGAIN
def test_pcm(self):
import audiotools.replaygain
gain = audiotools.replaygain.ReplayGain(44100)
(gain, peak) = gain.title_gain(
test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0))
main_reader = test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0)
reader = audiotools.replaygain.ReplayGainReader(main_reader,
gain,
peak)
#read FrameLists from ReplayGainReader
f = reader.read(4096)
while (len(f) > 0):
f = reader.read(4096)
#ensure subsequent reads return empty FrameLists
for i in xrange(10):
self.assertEqual(len(reader.read(4096)), 0)
#ensure closing the ReplayGainReader raises ValueError
#on subsequent reads
reader.close()
self.assertRaises(ValueError, reader.read, 4096)
#ensure wrapped reader is also closed
self.assertRaises(ValueError, main_reader.read, 4096)
@LIB_REPLAYGAIN
def test_reader(self):
import audiotools.replaygain
test_format = audiotools.WaveAudio
dummy1 = tempfile.NamedTemporaryFile(suffix="." + test_format.SUFFIX)
dummy2 = tempfile.NamedTemporaryFile(suffix="." + test_format.SUFFIX)
try:
#build dummy file
track1 = test_format.from_pcm(
dummy1.name,
test_streams.Sine16_Stereo(44100, 44100,
441.0, 0.50,
4410.0, 0.49, 1.0))
#calculate its ReplayGain
gain = audiotools.replaygain.ReplayGain(track1.sample_rate())
(gain, peak) = gain.title_gain(track1.to_pcm())
#apply gain to dummy file
track2 = test_format.from_pcm(
dummy2.name,
audiotools.replaygain.ReplayGainReader(track1.to_pcm(),
gain,
peak))
#ensure gain applied is quieter than without gain applied
gain2 = audiotools.replaygain.ReplayGain(track1.sample_rate())
(gain2, peak2) = gain2.title_gain(track2.to_pcm())
self.assert_(gain2 > gain)
finally:
dummy1.close()
dummy2.close()
class testcuesheet(unittest.TestCase):
@LIB_CORE
def setUp(self):
import audiotools.cue
self.sheet_class = audiotools.cue.Cuesheet
self.test_sheets = [
"""eJydlt1q20AQRu8NfofFDxB2Zv/nTshyUBvHQVHa3rppKCbFDqmbtG/f3VqQzZjtxYKvPiOdz6Od
Iw/dWiz727ZfCm1ArpZg57Mhhu1mve6uR7Hofm/vj82vb7tDe3j6I17kRQhPX/ViPmubsbnaXMYL
vUS0xqpgzHx20w2rzbDuBrG42z/uD6970Twfdz+P8ZKxH6+6t3zcHX88xHjVp3TY7r8/XLxuXxbi
c/Opm8+EGIem/SgkiOZu2W9SErPTPcbn7f2jhMUp/B80fd/fDq34cHPpjPRSgldTfL3svqT7S0n/
PhkUi1CsgiIgh2pSnpTJoKoIVZVQ/Q4qhQyESCrwLjE2pGzWRRe76KouTvIuIMlY0nwuKQ6kfdbF
FLuYyi6GQ4G0IgwZ1BahthJqOdQQ+jiDDOqKUFcJdQyKQICRm0F9EeoroZ49arQElhzwLjEOJPMV
CMUuoXIF+FlXkhI3GwDIEhRk3QAQ2QAiVBsy/ryLdtMKTF2KtoM62zl5NgCduiiXQYu2g0rbBcmh
jhRM4pmgRdtBre34eHXcaiSbLRgUtQaVWgPJHnUkJpXwAaRYk1NZl6LWoE5rCHzZIzFOHfPzVdQa
1GnNKL7V6XApguxtCkWtQZ3WELnAjUy/FCCDFrUGlVoDYI/a6CgvOlNsih2hzroUtQZ1WgPPj51J
IqWzFUixmyqeumDRdlhpO+C2s3Eocdn5wUixIZt3KdoOK20HindxcShxI3mX+IDg3b8MLEoQ6yTo
2L8vEA7SCz8do7+XaqGL""".decode('base64').decode('zlib')]
self.suffix = '.cue'
def sheets(self):
for test_sheet in self.test_sheets:
tempsheetfile = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
tempsheetfile.write(test_sheet)
tempsheetfile.flush()
sheet = audiotools.read_sheet(tempsheetfile.name)
finally:
tempsheetfile.close()
yield sheet
@LIB_CORE
def testreadsheet(self):
for sheet in self.sheets():
self.assertEqual(isinstance(sheet, self.sheet_class), True)
self.assertEqual(sheet.catalog(), '4580226563955')
self.assertEqual(sorted(sheet.ISRCs().items()),
[(1, 'JPG750800183'),
(2, 'JPG750800212'),
(3, 'JPG750800214'),
(4, 'JPG750800704'),
(5, 'JPG750800705'),
(6, 'JPG750800706'),
(7, 'JPG750800707'),
(8, 'JPG750800708'),
(9, 'JPG750800219'),
(10, 'JPG750800722'),
(11, 'JPG750800709'),
(12, 'JPG750800290'),
(13, 'JPG750800218'),
(14, 'JPG750800710'),
(15, 'JPG750800217'),
(16, 'JPG750800531'),
(17, 'JPG750800225'),
(18, 'JPG750800711'),
(19, 'JPG750800180'),
(20, 'JPG750800712'),
(21, 'JPG750800713'),
(22, 'JPG750800714')])
self.assertEqual(list(sheet.indexes()),
[(0, ), (20885, ), (42189, 42411), (49242, 49473),
(52754, ), (69656, ), (95428, ), (118271, 118430),
(136968, ), (138433, 138567), (156412, ),
(168864, ), (187716, ), (192245, 192373),
(200347, ), (204985, ), (227336, ),
(243382, 243549), (265893, 266032),
(292606, 292942), (302893, 303123), (321611, )])
self.assertEqual(list(sheet.pcm_lengths(191795016,
44100)),
[12280380, 12657288, 4152456, 1929228,
9938376, 15153936, 13525176, 10900344,
940212, 10492860, 7321776, 11084976,
2738316, 4688712, 2727144, 13142388,
9533244, 13220004, 15823080, 5986428,
10870944, 2687748])
@LIB_CORE
def testconvertsheet(self):
import audiotools.cue
import audiotools.toc
for sheet in self.sheets():
#convert to CUE and test for equality
temp_cue_file = tempfile.NamedTemporaryFile(suffix='.cue')
try:
temp_cue_file.write(audiotools.cue.Cuesheet.file(
sheet, os.path.basename(temp_cue_file.name)))
temp_cue_file.flush()
cue_sheet = audiotools.read_sheet(temp_cue_file.name)
self.assertEqual(sheet.catalog(), cue_sheet.catalog())
self.assertEqual(list(sheet.indexes()),
list(cue_sheet.indexes()))
self.assertEqual(list(sheet.pcm_lengths(191795016,
44100)),
list(cue_sheet.pcm_lengths(191795016,
44100)))
self.assertEqual(sorted(sheet.ISRCs().items()),
sorted(cue_sheet.ISRCs().items()))
finally:
temp_cue_file.close()
#convert to TOC and test for equality
temp_toc_file = tempfile.NamedTemporaryFile(suffix='.toc')
try:
temp_toc_file.write(audiotools.toc.TOCFile.file(
sheet, os.path.basename(temp_toc_file.name)))
temp_toc_file.flush()
toc_sheet = audiotools.read_sheet(temp_toc_file.name)
self.assertEqual(sheet.catalog(), toc_sheet.catalog())
self.assertEqual(list(sheet.indexes()),
list(toc_sheet.indexes()))
self.assertEqual(list(sheet.pcm_lengths(191795016,
44100)),
list(toc_sheet.pcm_lengths(191795016,
44100)))
self.assertEqual(sorted(sheet.ISRCs().items()),
sorted(toc_sheet.ISRCs().items()))
finally:
temp_toc_file.close()
#convert to embedded cuesheets and test for equality
for audio_class in [audiotools.FlacAudio,
audiotools.OggFlacAudio,
audiotools.WavPackAudio]:
temp_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
f = audio_class.from_pcm(
temp_file.name,
EXACT_BLANK_PCM_Reader(191795016))
f.set_cuesheet(sheet)
f_sheet = audiotools.open(temp_file.name).get_cuesheet()
self.assertNotEqual(f_sheet, None)
self.assertEqual(sheet.catalog(), f_sheet.catalog())
self.assertEqual(list(sheet.indexes()),
list(f_sheet.indexes()))
self.assertEqual(list(sheet.pcm_lengths(191795016,
44100)),
list(f_sheet.pcm_lengths(191795016,
44100)))
self.assertEqual(sorted(sheet.ISRCs().items()),
sorted(f_sheet.ISRCs().items()))
finally:
temp_file.close()
class testtocsheet(testcuesheet):
@LIB_CORE
def setUp(self):
import audiotools.toc
self.sheet_class = audiotools.toc.TOCFile
self.test_sheets = [
"""eJytlr1uG0EMhPt7isU9QExyf4/d4aTYShRJkM4IUglC0qULguT1Q15c7MJbspIhGPhmR8Mhl919
Nw/DMq/z8fzsxhALEKWY/BTjOAxPT2799fj+0+GwXufls5tfd4fzcDq75Xz5pp+X6/6+/3J5mW+H
27B+Pd+Xl/l02h/v///zcLsubvx0ec4RCgAWPw4fD8e9G388fj8+/H38GR04COwL+zhURLIhElKH
+MbTP0JgCDXYW4FDBzwxEfvJAbIXsB9u63xdHQADcaZaR7DRkaGjA4Fj4kAKDokTVTo8Q6p1RCsd
saMDOXgm8cNziEy5BicrcOqABVbEAwdRFYQGnK3A+T2YkJGErWBNn6/BxQpcOuDEmDijZn6LYRM9
mGodk9UITO91eGCVUhSMEweowQhGDlBn6oUsGYtFwxYnjqFyAOWbRohR4WXoWRBUiM9OjJfpg2bs
0ar4JuiQM3vc+iewzF47b2jWfJ34BZl04pS0+cRwat226jrsvFmw2jGg5FDU7eZnbwYQjcqOsDP6
smnEfKLNAuTUko3aLnrskCVtnnFbtFEswIZsVHdEnYKPoG9G1JkTCbklW/Uddt4cpeakYvNWtFI1
2PQdtsk3KjwsnfxJ1YgE3Bpfli76Jn+puT3Iqv96V0+KCvTotvfLGqqESDSbpU9W/Yedgy8JvMhQ
vq2i4Nvroz0Djeow986xjHoFaDq3UtJ0/gOiA7rW""".decode('base64').decode('zlib'),
"""eJytl+tq20AQhX9bT7HoAeKd2Zs0lFLhOMZtbigK9F9wHJGGNHZxlKal+N07uzGkcaDSwhpjzK7Q
fjrMnDMaj8WsXbWbRdfeiOvfYvnUYrdeCnmA2Xgs6vbHetOJ66fbR9GtxYebdvOw6B6X3z7dPvw6
uGk/ZpOqqY7PZiLXppCI1lhVGpNnk8Orw8r/NtOvjfiTCf4cV6ezy2o2vTqpznlpJAWJ6WnY2r65
QEi/3cyb46nIL1f3q/XzSjR33fc2z0bn0/rorD6Z1q9b1aa7e+zy3Z22WdbU1eSLqC4P52dhcX5R
T0T++XzmjCykhEK9XPzKN3p7tt/cnd9sFst7CfnL4n9OH23/eZRw9tHc36BerG7bg+fFz1xISeEr
pCZVkDK9qAgYi4ppUHeE/o/WJPUAVB2LqtKgloRIqhQSSDGqCtdeNFXdBMWRHPbSOxlNr5PQgyRj
SaNH1ZYs7tErknYAvYmlN2nogbQiZO0VaUPoBqDaWFSbBpXxCtZaSOOZ9RBUF4vqkqAiECDTelTf
f2oAahGLWqRBtQSWHHifCI34rvlkOcA6ylj6Mgm9kuQfoPCoUJKW/UJjrCGDTIXKDWYK32mmJKP3
hAZeHVAmsUJDmuRjX2Z65QQXBLuc7DdkLGUsaprkU44UhDjRxPY2wNIQYpsP0iSfZvdFstYnH9cA
DigAiFY1Tcwxpw8K6VF14QvgXfn2uxxCrCFDmpjjCYhrAjEIDWT7UY2CWNQ0MefbTBGEGdOw0NCv
KsYOD5Am5oz0qgJ4S2Nm14/qIFrVNDFnON04i11IZM4KeBdz0O8TUEQ3X5qY47xgbgjzBA+bsD8h
c0X3z/cu+lUE0ySfNZ5QgQgq82S0R8+9OWBChth3PkyTfJaJC/a+3YCk97Xn+b7/NdBFv1thmjB0
4IdmLve//kjXkg==""".decode('base64').decode('zlib')]
self.suffix = '.toc'
class testflaccuesheet(testcuesheet):
@LIB_CORE
def setUp(self):
self.sheet_class = audiotools.flac.Flac_CUESHEET
self.suffix = '.flac'
self.test_sheets = [
audiotools.flac.Flac_CUESHEET(
catalog_number='4580226563955\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
lead_in_samples=88200,
is_cdda=1,
tracks=[audiotools.flac.Flac_CUESHEET_track(
offset=0,
number=1,
ISRC='JPG750800183',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=12280380,
number=2,
ISRC='JPG750800212',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=24807132,
number=3,
ISRC='JPG750800214',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(130536, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=28954296,
number=4,
ISRC='JPG750800704',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(135828, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=31019352,
number=5,
ISRC='JPG750800705',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=40957728,
number=6,
ISRC='JPG750800706',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=56111664,
number=7,
ISRC='JPG750800707',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=69543348,
number=8,
ISRC='JPG750800708',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(93492, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=80537184,
number=9,
ISRC='JPG750800219',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=81398604,
number=10,
ISRC='JPG750800722',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(78792, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=91970256,
number=11,
ISRC='JPG750800709',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=99292032,
number=12,
ISRC='JPG750800290',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=110377008,
number=13,
ISRC='JPG750800218',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=113040060,
number=14,
ISRC='JPG750800710',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(75264, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=117804036,
number=15,
ISRC='JPG750800217',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=120531180,
number=16,
ISRC='JPG750800531',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=133673568,
number=17,
ISRC='JPG750800225',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=143108616,
number=18,
ISRC='JPG750800711',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(98196, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=156345084,
number=19,
ISRC='JPG750800180',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(81732, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=172052328,
number=20,
ISRC='JPG750800712',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(197568, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=178101084,
number=21,
ISRC='JPG750800713',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 0),
audiotools.flac.Flac_CUESHEET_index(135240, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=189107268,
number=22,
ISRC='JPG750800714',
track_type=0,
pre_emphasis=0,
index_points=[audiotools.flac.Flac_CUESHEET_index(0, 1)]),
audiotools.flac.Flac_CUESHEET_track(
offset=191795016,
number=170,
ISRC='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
track_type=0,
pre_emphasis=0,
index_points=[])])]
def sheets(self):
for test_sheet in self.test_sheets:
tempflacfile = tempfile.NamedTemporaryFile(suffix=self.suffix)
try:
tempflac = audiotools.FlacAudio.from_pcm(
tempflacfile.name,
EXACT_BLANK_PCM_Reader(191795016),
"1")
metadata = tempflac.get_metadata()
metadata.replace_blocks(
audiotools.flac.Flac_CUESHEET.BLOCK_ID,
[audiotools.flac.Flac_CUESHEET.converted(
test_sheet,
191795016)])
tempflac.update_metadata(metadata)
sheet = audiotools.open(
tempflacfile.name).get_metadata().get_block(
audiotools.flac.Flac_CUESHEET.BLOCK_ID)
finally:
tempflacfile.close()
yield sheet
#takes several 1-channel PCMReaders and combines them into a single PCMReader
class PCM_Reader_Multiplexer:
def __init__(self, pcm_readers, channel_mask):
self.buffers = map(audiotools.BufferedPCMReader, pcm_readers)
self.sample_rate = pcm_readers[0].sample_rate
self.channels = len(pcm_readers)
self.channel_mask = channel_mask
self.bits_per_sample = pcm_readers[0].bits_per_sample
def read(self, pcm_frames):
return audiotools.pcm.from_channels(
[reader.read(pcm_frames) for reader in self.buffers])
def close(self):
for reader in self.buffers:
reader.close()
class TestMultiChannel(unittest.TestCase):
def setUp(self):
#these support the full range of ChannelMasks
self.wav_channel_masks = [audiotools.WaveAudio,
audiotools.WavPackAudio]
#these support a subset of ChannelMasks up to 6 channels
self.flac_channel_masks = [audiotools.FlacAudio,
audiotools.OggFlacAudio]
if (audiotools.m4a.M4AAudio_nero.has_binaries(audiotools.BIN)):
self.flac_channel_masks.append(audiotools.m4a.M4AAudio_nero)
#these support a reordered subset of ChannelMasks up to 8 channels
self.vorbis_channel_masks = [audiotools.VorbisAudio,
audiotools.OpusAudio]
def __test_mask_blank__(self, audio_class, channel_mask):
temp_file = tempfile.NamedTemporaryFile(suffix="." + audio_class.SUFFIX)
try:
temp_track = audio_class.from_pcm(
temp_file.name,
PCM_Reader_Multiplexer(
[BLANK_PCM_Reader(2, channels=1)
for i in xrange(len(channel_mask))],
channel_mask))
self.assertEqual(temp_track.channel_mask(), channel_mask,
"%s != %s for format %s" %
(temp_track.channel_mask(),
channel_mask,
audio_class.NAME))
pcm = temp_track.to_pcm()
self.assertEqual(int(pcm.channel_mask), int(channel_mask))
audiotools.transfer_framelist_data(pcm, lambda x: x)
pcm.close()
finally:
temp_file.close()
def __test_undefined_mask_blank__(self, audio_class, channels,
should_be_blank):
temp_file = tempfile.NamedTemporaryFile(suffix="." + audio_class.SUFFIX)
try:
temp_track = audio_class.from_pcm(
temp_file.name,
PCM_Reader_Multiplexer(
[BLANK_PCM_Reader(2, channels=1)
for i in xrange(channels)],
audiotools.ChannelMask(0)))
self.assertEqual(temp_track.channels(), channels)
if (should_be_blank):
self.assertEqual(int(temp_track.channel_mask()), 0)
pcm = temp_track.to_pcm()
self.assertEqual(int(pcm.channel_mask), 0)
audiotools.transfer_framelist_data(pcm, lambda x: x)
pcm.close()
else:
self.assertNotEqual(int(temp_track.channel_mask()), 0,
"mask = %s for format %s at %d channels" %
(temp_track.channel_mask(),
audio_class,
channels))
pcm = temp_track.to_pcm()
self.assertEqual(int(pcm.channel_mask),
int(temp_track.channel_mask()))
audiotools.transfer_framelist_data(pcm, lambda x: x)
pcm.close()
finally:
temp_file.close()
def __test_error_mask_blank__(self, audio_class, channels,
channel_mask):
temp_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
self.assertRaises(audiotools.UnsupportedChannelMask,
audio_class.from_pcm,
temp_file.name,
PCM_Reader_Multiplexer(
[BLANK_PCM_Reader(2, channels=1)
for i in xrange(channels)],
channel_mask))
finally:
temp_file.close()
def __test_error_channel_count__(self, audio_class, channels,
channel_mask):
temp_file = tempfile.NamedTemporaryFile(
suffix="." + audio_class.SUFFIX)
try:
self.assertRaises(audiotools.UnsupportedChannelCount,
audio_class.from_pcm,
temp_file.name,
PCM_Reader_Multiplexer(
[BLANK_PCM_Reader(2, channels=1)
for i in xrange(channels)],
channel_mask))
finally:
temp_file.close()
def __test_pcm_conversion__(self,
source_audio_class,
target_audio_class,
channel_mask):
source_file = tempfile.NamedTemporaryFile(suffix="." + source_audio_class.SUFFIX)
target_file = tempfile.NamedTemporaryFile(suffix="." + target_audio_class.SUFFIX)
wav_file = tempfile.NamedTemporaryFile(suffix=".wav")
try:
source_track = source_audio_class.from_pcm(
source_file.name,
PCM_Reader_Multiplexer(
[BLANK_PCM_Reader(2, channels=1)
for i in xrange(len(channel_mask))],
channel_mask))
self.assertEqual(source_track.channel_mask(), channel_mask)
source_pcm = source_track.to_pcm()
self.assertEqual(isinstance(source_pcm.channel_mask, int),
True,
"%s's to_pcm() PCMReader is not an int" % \
(source_audio_class.NAME))
target_track = target_audio_class.from_pcm(
target_file.name,
source_pcm)
self.assertEqual(target_track.channel_mask(), channel_mask)
self.assertEqual(source_track.channel_mask(),
target_track.channel_mask())
source_track.convert(wav_file.name, audiotools.WaveAudio)
wav = audiotools.open(wav_file.name)
wav.verify()
self.assertEqual(source_track.channel_mask(),
wav.channel_mask())
target_track = wav.convert(target_file.name,
audiotools.WaveAudio)
self.assertEqual(target_track.channel_mask(), channel_mask)
self.assertEqual(source_track.channel_mask(),
target_track.channel_mask())
finally:
source_file.close()
target_file.close()
wav_file.close()
def __test_assignment__(self, audio_class, tone_tracks, channel_mask):
from audiotools import replaygain as replaygain
self.assertEqual(len(tone_tracks), len(channel_mask))
temp_file = tempfile.NamedTemporaryFile(suffix="." + audio_class.SUFFIX)
try:
temp_track = audio_class.from_pcm(
temp_file.name,
PCM_Reader_Multiplexer([t.to_pcm() for t in tone_tracks],
channel_mask))
gain_values = [
replaygain.ReplayGain(temp_track.sample_rate()).title_gain(
audiotools.RemaskedPCMReader(temp_track.to_pcm(),
1,
mask))[0]
for mask in
[int(audiotools.ChannelMask.from_fields(
**{channel_name:True}))
for channel_name in
channel_mask.channels()]]
self.assertEqual(set([True]),
set([prev.replay_gain().track_gain >
curr.replay_gain().track_gain
for (prev, curr) in
zip(tone_tracks, tone_tracks[1:])]))
self.assertEqual(set([True]),
set([prev > curr for (prev, curr) in
zip(gain_values, gain_values[1:])]),
"channel mismatch for mask %s with format %s (gain values %s)" % (channel_mask, audio_class.NAME, gain_values))
finally:
temp_file.close()
@LIB_CORE
def test_channel_mask(self):
from_fields = audiotools.ChannelMask.from_fields
for audio_class in (self.wav_channel_masks +
self.flac_channel_masks +
self.vorbis_channel_masks):
for mask in [from_fields(front_center=True),
from_fields(front_left=True,
front_right=True),
from_fields(front_left=True,
front_right=True,
front_center=True),
from_fields(front_right=True,
front_left=True,
back_right=True,
back_left=True),
from_fields(front_right=True,
front_center=True,
front_left=True,
back_right=True,
back_left=True),
from_fields(front_right=True,
front_center=True,
low_frequency=True,
front_left=True,
back_right=True,
back_left=True)]:
self.__test_mask_blank__(audio_class, mask)
for audio_class in (self.wav_channel_masks +
self.vorbis_channel_masks):
for mask in [from_fields(front_left=True, front_right=True,
front_center=True,
side_left=True, side_right=True,
back_center=True, low_frequency=True),
from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, low_frequency=True)]:
self.__test_mask_blank__(audio_class, mask)
for audio_class in self.wav_channel_masks:
for mask in [from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, back_center=True,
low_frequency=True),
from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, back_center=True)]:
self.__test_mask_blank__(audio_class, mask)
@LIB_CORE
def test_channel_mask_conversion(self):
from_fields = audiotools.ChannelMask.from_fields
for source_audio_class in audiotools.AVAILABLE_TYPES:
for target_audio_class in audiotools.AVAILABLE_TYPES:
self.__test_pcm_conversion__(source_audio_class,
target_audio_class,
from_fields(front_left=True,
front_right=True))
for source_audio_class in (self.wav_channel_masks +
self.flac_channel_masks +
self.vorbis_channel_masks):
for target_audio_class in (self.wav_channel_masks +
self.flac_channel_masks +
self.vorbis_channel_masks):
for mask in [from_fields(front_center=True),
from_fields(front_left=True,
front_right=True),
from_fields(front_left=True,
front_right=True,
front_center=True),
from_fields(front_right=True,
front_left=True,
back_right=True,
back_left=True),
from_fields(front_right=True,
front_center=True,
front_left=True,
back_right=True,
back_left=True),
from_fields(front_right=True,
front_center=True,
low_frequency=True,
front_left=True,
back_right=True,
back_left=True)]:
self.__test_pcm_conversion__(source_audio_class,
target_audio_class,
mask)
for source_audio_class in (self.wav_channel_masks +
self.vorbis_channel_masks):
for target_audio_class in (self.wav_channel_masks +
self.vorbis_channel_masks):
for mask in [from_fields(front_left=True, front_right=True,
front_center=True,
side_left=True, side_right=True,
back_center=True, low_frequency=True),
from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, low_frequency=True)]:
self.__test_pcm_conversion__(source_audio_class,
target_audio_class,
mask)
for source_audio_class in self.wav_channel_masks:
for target_audio_class in self.wav_channel_masks:
for mask in [from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, back_center=True,
low_frequency=True),
from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, back_center=True)]:
self.__test_pcm_conversion__(source_audio_class,
target_audio_class,
mask)
@LIB_CORE
def test_channel_assignment(self):
from_fields = audiotools.ChannelMask.from_fields
TONE_TRACKS = map(audiotools.open,
["tone%d.flac" % (i + 1) for i in xrange(8)])
for audio_class in audiotools.AVAILABLE_TYPES:
self.__test_assignment__(audio_class,
TONE_TRACKS[0:2],
from_fields(front_left=True,
front_right=True))
for audio_class in (self.wav_channel_masks +
self.flac_channel_masks +
self.vorbis_channel_masks):
for mask in [from_fields(front_left=True,
front_right=True,
front_center=True),
from_fields(front_right=True,
front_left=True,
back_right=True,
back_left=True),
from_fields(front_right=True,
front_center=True,
front_left=True,
back_right=True,
back_left=True),
from_fields(front_right=True,
front_center=True,
low_frequency=True,
front_left=True,
back_right=True,
back_left=True)]:
#Encoding 6 channel audio with neroAacEnc
#with this batch of tones causes Nero to essentially
#zero out the LFE channel,
#as does newer versions of oggenc.
#This is likely due to the characteristics of
#my input samples.
if ((len(mask) == 6) and
((audio_class is audiotools.m4a.M4AAudio_nero) or
(audio_class is audiotools.VorbisAudio))):
continue
self.__test_assignment__(audio_class,
TONE_TRACKS[0:len(mask)],
mask)
for audio_class in (self.wav_channel_masks +
self.vorbis_channel_masks):
for mask in [from_fields(front_left=True, front_right=True,
front_center=True,
side_left=True, side_right=True,
back_center=True, low_frequency=True),
from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, low_frequency=True)]:
self.__test_assignment__(audio_class,
TONE_TRACKS[0:len(mask)],
mask)
for audio_class in self.wav_channel_masks:
for mask in [from_fields(front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, back_center=True)]:
self.__test_assignment__(audio_class,
TONE_TRACKS[0:len(mask)],
mask)
# for mask in [from_fields(front_left=True, front_right=True),
# from_fields(front_left=True, front_right=True,
# back_left=True, back_right=True),
# from_fields(front_left=True, side_left=True,
# front_center=True, front_right=True,
# side_right=True, back_center=True)]:
# self.__test_assignment__(audiotools.AiffAudio,
# TONE_TRACKS[0:len(mask)],
# mask)
@LIB_CORE
def test_unsupported_channel_mask_from_pcm(self):
for channels in xrange(1, 6 + 1):
self.__test_undefined_mask_blank__(audiotools.WaveAudio,
channels,
False)
for channels in xrange(1, 3):
self.__test_undefined_mask_blank__(audiotools.WavPackAudio,
channels,
False)
for channels in xrange(3, 21):
self.__test_undefined_mask_blank__(audiotools.WavPackAudio,
channels,
True)
for channels in xrange(1, 9):
self.__test_undefined_mask_blank__(audiotools.ALACAudio,
channels,
False)
for channels in xrange(9, 21):
self.__test_undefined_mask_blank__(audiotools.ALACAudio,
channels,
True)
for audio_class in [audiotools.FlacAudio, audiotools.OggFlacAudio]:
for channels in xrange(1, 7):
self.__test_undefined_mask_blank__(audio_class,
channels,
False)
for channels in xrange(7, 9):
self.__test_undefined_mask_blank__(audio_class,
channels,
True)
self.__test_error_channel_count__(audio_class,
9, audiotools.ChannelMask(0))
self.__test_error_channel_count__(audio_class,
10, audiotools.ChannelMask(0))
for stereo_audio_class in [audiotools.MP3Audio,
audiotools.MP2Audio,
audiotools.m4a.M4AAudio_faac]:
self.__test_undefined_mask_blank__(stereo_audio_class,
2, False)
for channels in xrange(3, 20):
temp_file = tempfile.NamedTemporaryFile(suffix="." + stereo_audio_class.SUFFIX)
try:
temp_track = stereo_audio_class.from_pcm(
temp_file.name,
PCM_Reader_Multiplexer(
[BLANK_PCM_Reader(2, channels=1)
for i in xrange(channels)],
audiotools.ChannelMask(0)))
self.assertEqual(temp_track.channels(), 2)
self.assertEqual(int(temp_track.channel_mask()),
int(audiotools.ChannelMask.from_fields(
front_left=True, front_right=True)))
pcm = temp_track.to_pcm()
self.assertEqual(int(pcm.channel_mask),
int(temp_track.channel_mask()))
audiotools.transfer_framelist_data(pcm, lambda x: x)
pcm.close()
finally:
temp_file.close()
for channels in xrange(1, 9):
self.__test_undefined_mask_blank__(audiotools.VorbisAudio,
channels,
False)
for channels in xrange(9, 20):
self.__test_undefined_mask_blank__(audiotools.VorbisAudio,
channels,
True)
for channels in [1, 2]:
self.__test_undefined_mask_blank__(audiotools.AiffAudio,
channels,
False)
for channels in [3, 4, 5, 6, 7, 8, 9, 10]:
self.__test_undefined_mask_blank__(audiotools.AiffAudio,
channels,
True)
for channels in [1, 2]:
self.__test_undefined_mask_blank__(audiotools.AuAudio,
channels,
False)
for channels in xrange(3, 11):
self.__test_undefined_mask_blank__(audiotools.AuAudio,
channels,
True)
if (audiotools.m4a.M4AAudio_nero.has_binaries(audiotools.BIN)):
for channels in xrange(1, 7):
self.__test_undefined_mask_blank__(audiotools.m4a.M4AAudio_nero,
channels,
False)
class __callback__:
def __init__(self):
self.called = False
def call(self):
self.called = True
class Test_Player(unittest.TestCase):
@LIB_PLAYER
def setUp(self):
self.temp_track_file = tempfile.NamedTemporaryFile(suffix=".flac")
self.temp_track = audiotools.FlacAudio.from_pcm(
self.temp_track_file.name,
BLANK_PCM_Reader(6))
@LIB_PLAYER
def tearDown(self):
self.temp_track_file.close()
@LIB_PLAYER
def test_player(self):
import audiotools.player
import time
callback = __callback__()
player = audiotools.player.Player(audiotools.player.NULLAudioOutput(),
next_track_callback=callback.call)
self.assertEqual(callback.called, False)
self.assertEqual(player.progress(), (0, 0))
player.open(self.temp_track)
player.play()
time.sleep(1)
(current1, total1) = player.progress()
self.assertEqual(callback.called, False)
self.assert_(current1 > 0)
self.assert_(total1 > 0)
time.sleep(1)
(current2, total2) = player.progress()
self.assertEqual(callback.called, False)
self.assert_(current2 > current1)
self.assertEqual(total2, total1)
time.sleep(1)
player.pause()
time.sleep(.5)
(current3, total3) = player.progress()
self.assertEqual(callback.called, False)
self.assert_(current3 > current2)
self.assertEqual(total3, total1)
time.sleep(1)
(current4, total4) = player.progress()
self.assertEqual(callback.called, False)
self.assertEqual(current4, current3)
self.assertEqual(total4, total1)
player.play()
time.sleep(6)
self.assertEqual(callback.called, True)
player.close()
class Test_CDPlayer(unittest.TestCase):
@LIB_PLAYER
def setUp(self):
self.input_dir = tempfile.mkdtemp()
self.stream = test_streams.Sine16_Stereo(793800, 44100,
8820.0, 0.70,
4410.0, 0.29, 1.0)
self.cue_file = os.path.join(self.input_dir, "CDImage.cue")
self.bin_file = os.path.join(self.input_dir, "CDImage.bin")
f = open(self.cue_file, "w")
f.write('FILE "CDImage.wav" WAVE\r\n TRACK 01 AUDIO\r\n ISRC JPPI00652340\r\n INDEX 01 00:00:00\r\n TRACK 02 AUDIO\r\n ISRC JPPI00652349\r\n INDEX 00 00:06:00\r\n INDEX 01 00:08:00\r\n TRACK 03 AUDIO\r\n ISRC JPPI00652341\r\n INDEX 00 00:9:00\r\n INDEX 01 00:11:00\r\n')
f.close()
f = open(self.bin_file, "w")
audiotools.transfer_framelist_data(self.stream, f.write)
f.close()
self.cdda = audiotools.CDDA(self.cue_file)
@LIB_PLAYER
def tearDown(self):
for f in os.listdir(self.input_dir):
os.unlink(os.path.join(self.input_dir, f))
os.rmdir(self.input_dir)
@LIB_PLAYER
def test_player(self):
import audiotools.player
import time
callback = __callback__()
player = audiotools.player.CDPlayer(
self.cdda,
audiotools.player.NULLAudioOutput(),
next_track_callback=callback.call)
self.assertEqual(callback.called, False)
self.assertEqual(player.progress(), (0, 0))
player.open(1)
player.play()
time.sleep(1)
(current1, total1) = player.progress()
self.assertEqual(callback.called, False)
self.assert_(current1 > 0)
self.assert_(total1 > 0)
time.sleep(1)
(current2, total2) = player.progress()
self.assertEqual(callback.called, False)
self.assert_(current2 > current1)
self.assertEqual(total2, total1)
time.sleep(1)
player.pause()
time.sleep(.5)
(current3, total3) = player.progress()
self.assertEqual(callback.called, False)
self.assert_(current3 > current2)
self.assertEqual(total3, total1)
time.sleep(1)
(current4, total4) = player.progress()
self.assertEqual(callback.called, False)
self.assertEqual(current4, current3)
self.assertEqual(total4, total1)
player.play()
time.sleep(6)
self.assertEqual(callback.called, True)
player.close()
|
Excito/audiotools
|
test/test_core.py
|
Python
|
gpl-2.0
| 195,644
|
[
"Brian"
] |
cf7ad2cf58881aa85974d5aa59796763b047d02b53d04df06170e92f493b36b1
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import shutil
class Gaussian(Package):
"""Gaussian is a computer program for computational chemistry"""
homepage = "http://www.gaussian.com/"
url = "file://{0}/g09.tgz".format(os.getcwd())
version('09', '7d4c95b535e68e48af183920df427e4e')
def install(self, spec, prefix):
shutil.copytree(os.getcwd(), prefix.bin)
patch_install_files = ['flc',
'linda8.2/opteron-linux/bin/flc',
'linda8.2/opteron-linux/bin/LindaLauncher',
'linda8.2/opteron-linux/bin/ntsnet',
'linda8.2/opteron-linux/bin/pmbuild',
'linda8.2/opteron-linux/bin/vntsnet',
'ntsnet'
]
for filename in patch_install_files:
if os.path.isfile(filename):
filter_file('/mf/frisch/g09', prefix.bin, join_path(prefix.bin,
filename), string='True')
patch_install_files = ['linda8.2/opteron-linux/bin/ntsnet',
'linda8.2/opteron-linux/bin/vntsnet',
]
for filename in patch_install_files:
if os.path.isfile(filename):
filter_file('/usr/bin/linda', prefix.bin, join_path(prefix.bin,
filename), string='True')
def setup_environment(self, spack_env, run_env):
run_env.set('g09root', self.prefix)
run_env.set('GAUSSIANHOME', self.prefix)
run_env.set('GAUSS_EXEDIR', self.prefix.bin)
run_env.set('G09_BASIS', join_path(self.prefix.bin, 'basis'))
run_env.set('GAUSS_LEXEDIR', join_path(self.prefix.bin,
'linda-exe'))
run_env.set('GAUSS_ARCHDIR', join_path(self.prefix.bin, 'arch'))
run_env.set('GAUSS_BSDDIR', join_path(self.prefix.bin, 'bsd'))
run_env.prepend_path('LD_LIBRARY_PATH', join_path(self.prefix.bin,
'linda8.2/opteron-linux/lib'))
run_env.prepend_path('LD_LIBRARY_PATH', self.prefix.bin)
|
wscullin/spack
|
var/spack/repos/builtin/packages/gaussian/package.py
|
Python
|
lgpl-2.1
| 3,390
|
[
"Gaussian"
] |
8059ee52ce8547504d79cfbe6882ec355d67431e3fadd5bdd2c7fca7d128233a
|
# Copyright (C) 2018
# Max Planck Institute for Polymer Research
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
****************************************
espressopp.interaction.TabulatedDihedral
****************************************
Calculates energies and forces for a dihedral tabulated potential.
In the tabulated potential file, angles should be in radians, and
the file should cover the range -pi radians to +pi radians (-180 to
+180 degrees).
Note that this class has only been tested for symmetric tabulated
potentials.
.. function:: espressopp.interaction.TabulatedDihedral(itype, filename)
:param itype: The interpolation type: 1 - linear, 2 - akima spline, 3 - cubic spline
:param filename: The tabulated potential filename.
:type itype: int
:type filename: str
.. function:: espressopp.interaction.FixedQuadrupleListTabulatedDihedral(system, fql, potential)
:param system: The Espresso++ system object.
:param fql: The FixedQuadrupleList.
:param potential: The potential.
:type system: espressopp.System
:type fql: espressopp.FixedQuadrupleList
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedQuadrupleListTabulatedDihedral.setPotential(potential)
:param potential: The potential object.
:type potential: espressopp.interaction.Potential
.. function:: espressopp.interaction.FixedQuadrupleListTypesTabulatedDihedral(system, fql)
:param system: The Espresso++ system object.
:type system: espressopp.System
:param ftl: The FixedQuadrupleList list.
:type ftl: espressopp.FixedQuadrupleList
.. function:: espressopp.interaction.FixedQuadrupleListTypesTabulatedDihedral(system, ftl)
:param system: The Espresso++ system object.
:type system: espressopp.System
:param ftl: The FixedQuadruple list.
:type ftl: espressopp.FixedQuadrupleList
.. function:: espressopp.interaction.FixedQuadrupleListTypesTabulatedDihedral.setPotential(type1, type2, type3, type4, potential)
Defines dihedral potential for interaction between particles of types type1-type2-type3-type4.
:param type1: Type of particle 1.
:type type1: int
:param type2: Type of particle 2.
:type type2: int
:param type3: Type of particle 3.
:type type3: int
:param type4: Type of particle 4.
:type type4: int
:param potential: The potential to set up.
:type potential: espressopp.interaction.DihedralPotential
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.DihedralPotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_TabulatedDihedral, \
interaction_FixedQuadrupleListTabulatedDihedral, \
interaction_FixedQuadrupleListTypesTabulatedDihedral
class TabulatedDihedralLocal(DihedralPotentialLocal, interaction_TabulatedDihedral):
def __init__(self, itype, filename):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_TabulatedDihedral, itype, filename)
class FixedQuadrupleListTabulatedDihedralLocal(InteractionLocal, interaction_FixedQuadrupleListTabulatedDihedral):
def __init__(self, system, fql, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedQuadrupleListTabulatedDihedral, system, fql, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
class FixedQuadrupleListTypesTabulatedDihedralLocal(InteractionLocal, interaction_FixedQuadrupleListTypesTabulatedDihedral):
def __init__(self, system, fql):
if pmi.workerIsActive():
cxxinit(self, interaction_FixedQuadrupleListTypesTabulatedDihedral, system, fql)
def setPotential(self, type1, type2, type3, type4, potential):
if pmi.workerIsActive():
self.cxxclass.setPotential(self, type1, type2, type3, type4, potential)
def getPotential(self, type1, type2, type3, type4):
if pmi.workerIsActive():
return self.cxxclass.getPotential(self, type1, type2, type3, type4)
def setFixedQuadrupleList(self, fixedlist):
if pmi.workerIsActive():
self.cxxclass.setFixedQuadrupleList(self, fixedlist)
def getFixedQuadrupleList(self):
if pmi.workerIsActive():
return self.cxxclass.getFixedQuadrupleList(self)
if pmi.isController:
class TabulatedDihedral(DihedralPotential):
'The TabulatedDihedral potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.TabulatedDihedralLocal',
pmiproperty = ['itype', 'filename']
)
class FixedQuadrupleListTabulatedDihedral(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedQuadrupleListTabulatedDihedralLocal',
pmicall = ['setPotential', 'getFixedQuadrupleList']
)
class FixedQuadrupleListTypesTabulatedDihedral(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedQuadrupleListTypesTabulatedDihedralLocal',
pmicall = ['setPotential','getPotential','setFixedQuadrupleList','getFixedQuadrupleList']
)
|
niktre/espressopp
|
src/interaction/TabulatedDihedral.py
|
Python
|
gpl-3.0
| 6,508
|
[
"ESPResSo"
] |
e61e069d5380cac9a53f6f017b26a90537fc1a32696e6e37b8b601a1dd14dde4
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
Assignment Problem
==================
Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers
to perform any of *n* jobs. The assignment problem is to assign jobs to
workers in a way that minimizes the total cost. Since each worker can perform
only one job and each job can be assigned to only one worker the assignments
represent an independent set of the matrix *C*.
One way to generate the optimal set is to create all permutations of
the indexes necessary to traverse the matrix so that no row and column
are used more than once. For instance, given this matrix (expressed in
Python)::
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
You could use this code to generate the traversal indexes::
def permute(a, results):
if len(a) == 1:
results.insert(len(results), a)
else:
for i in range(0, len(a)):
element = a[i]
a_copy = [a[j] for j in range(0, len(a)) if j != i]
subresults = []
permute(a_copy, subresults)
for subresult in subresults:
result = [element] + subresult
results.insert(len(results), result)
results = []
permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix
After the call to permute(), the results matrix would look like this::
[[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]]
You could then use that index matrix to loop over the original cost matrix
and calculate the smallest cost of the combinations::
n = len(matrix)
minval = sys.maxsize
for row in range(n):
cost = 0
for col in range(n):
cost += matrix[row][col]
minval = min(cost, minval)
print minval
While this approach works fine for small matrices, it does not scale. It
executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n*
matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600
traversals. Even if you could manage to perform each traversal in just one
millisecond, it would still take more than 133 hours to perform the entire
traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At
an optimistic millisecond per operation, that's more than 77 million years.
The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This
package provides an implementation of that algorithm.
This version is based on
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html.
This version was written for Python by Brian Clapper from the (Ada) algorithm
at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was
clearly adapted from the same web site.)
Usage
=====
Construct a Munkres object::
from munkres import Munkres
m = Munkres()
Then use it to compute the lowest cost assignment from a cost matrix. Here's
a sample program::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
Running that program produces::
Lowest cost through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 0) -> 5
(1, 1) -> 3
(2, 2) -> 4
total cost=12
The instantiated Munkres object can be used multiple times on different
matrices.
Non-square Cost Matrices
========================
The Munkres algorithm assumes that the cost matrix is square. However, it's
possible to use a rectangular matrix if you first pad it with 0 values to make
it square. This module automatically pads rectangular cost matrices to make
them square.
Notes:
- The module operates on a *copy* of the caller's matrix, so any padding will
not be seen by the caller.
- The cost matrix must be rectangular or square. An irregular matrix will
*not* work.
Calculating Profit, Rather than Cost
====================================
The cost matrix is just that: A cost matrix. The Munkres algorithm finds
the combination of elements (one from each row and column) that results in
the smallest cost. It's also possible to use the algorithm to maximize
profit. To do that, however, you have to convert your profit matrix to a
cost matrix. The simplest way to do that is to subtract all elements from a
large value. For example::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row += [sys.maxsize - col]
cost_matrix += [cost_row]
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Highest profit through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
Running that program produces::
Highest profit through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 1) -> 9
(1, 0) -> 10
(2, 2) -> 4
total profit=23
The ``munkres`` module provides a convenience method for creating a cost
matrix from a profit matrix. Since it doesn't know whether the matrix contains
floating point numbers, decimals, or integers, you have to provide the
conversion function; but the convenience method takes care of the actual
creation of the cost matrix::
import munkres
cost_matrix = munkres.make_cost_matrix(matrix,
lambda cost: sys.maxsize - cost)
So, the above profit-calculation program can be recast as::
from munkres import Munkres, print_matrix, make_cost_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxsize - cost)
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
References
==========
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2008 Brian M. Clapper
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name "clapper.org" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import copy
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__version__ = "1.0.6"
__author__ = "Brian Clapper, bmc@clapper.org"
__url__ = "http://software.clapper.org/munkres/"
__copyright__ = "(c) 2008 Brian M. Clapper"
__license__ = "BSD-style license"
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
"""
**DEPRECATED**
Please use the module function ``make_cost_matrix()``.
"""
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
"""
Pad a possibly non-square matrix to make it square.
:Parameters:
matrix : list of lists
matrix to pad
pad_value : int
value to use to pad the matrix
:rtype: list of lists
:return: a new, possibly padded, matrix
"""
max_columns = 0
total_rows = len(matrix)
for row in matrix:
max_columns = max(max_columns, len(row))
total_rows = max(max_columns, total_rows)
new_matrix = []
for row in matrix:
row_len = len(row)
new_row = row[:]
if total_rows > row_len:
# Row too short. Pad it.
new_row += [0] * (total_rows - row_len)
new_matrix += [new_row]
while len(new_matrix) < total_rows:
new_matrix += [[0] * total_rows]
return new_matrix
def compute(self, cost_matrix):
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of (row, column) tuples
that can be used to traverse the matrix.
:Parameters:
cost_matrix : list of lists
The cost matrix. If this cost matrix is not square, it
will be padded with zeros, via a call to ``pad_matrix()``.
(This method does *not* modify the caller's matrix. It
operates on a copy of the matrix.)
**WARNING**: This code handles square and rectangular
matrices. It does *not* handle irregular matrices.
:rtype: list
:return: A list of ``(row, column)`` tuples that describe the lowest
cost path through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if not self.col_covered[j]:
self.C[i][j] -= minval
return 4
def __find_smallest(self):
"""Find the smallest uncovered value in the matrix."""
minval = sys.maxsize
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self):
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
return (row, col)
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self, path, count):
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(profit_matrix, inversion_function):
"""
Create a cost matrix from a profit matrix by calling
'inversion_function' to invert each value. The inversion
function must take one numeric argument (of any type) and return
another numeric argument which is presumed to be the cost inverse
of the original profit.
This is a static method. Call it like this:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x)
:Parameters:
profit_matrix : list of lists
The matrix to convert from a profit to a cost matrix
inversion_function : function
The function to use to invert each entry in the profit matrix
:rtype: list of lists
:return: The converted matrix
"""
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix, msg=None):
"""
Convenience function: Displays the contents of a matrix of integers.
:Parameters:
matrix : list of lists
Matrix to print
msg : str
Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
width = max(width, int(math.log10(val)) + 1)
# Make the format string
format = '%%%dd' % width
# Print the matrix
for row in matrix:
sep = '['
for val in row:
sys.stdout.write(sep + format % val)
sep = ', '
sys.stdout.write(']\n')
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850), # expected cost
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452), # expected cost
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15)]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print('(%d, %d) -> %d' % (r, c, x))
print('lowest cost=%d' % total_cost)
assert expected_total == total_cost
|
maxkoryukov/headphones
|
lib/munkres.py
|
Python
|
gpl-3.0
| 24,729
|
[
"Brian"
] |
06cd5bd01f6f9bf3f49b13c01e19b1d5974b83ad4900a8a3931028ec2eecc9da
|
import meshplex
import numpy as np
import pyamg
from numpy import pi
from sympy import sin
import pyfvm
from pyfvm.form_language import Subdomain, dS, dV, integrate, n_dot_grad
def test():
class Gamma0(Subdomain):
def is_inside(self, x):
return x[1] < 0.5
is_boundary_only = True
class Gamma1(Subdomain):
def is_inside(self, x):
return x[1] >= 0.5
is_boundary_only = True
class Poisson:
def apply(self, u):
return integrate(lambda x: -n_dot_grad(u(x)), dS) - integrate(
lambda x: 50 * sin(2 * pi * x[0]), dV
)
def dirichlet(self, u):
return [(lambda x: u(x) - 0.0, Gamma0()), (lambda x: u(x) - 1.0, Gamma1())]
# # Read the mesh from file
# mesh, _, _ = pyfvm.reader.read('circle.vtu')
# Create mesh using meshzoo
import meshzoo
vertices, cells = meshzoo.cube_tetra(
np.linspace(0.0, 1.0, 30), np.linspace(0.0, 1.0, 30), np.linspace(0.0, 1.0, 30)
)
mesh = meshplex.Mesh(vertices, cells)
# vertices, cells = meshzoo.rectangle(0.0, 2.0, 0.0, 1.0, 401, 201)
# mesh = meshplex.Mesh(vertices, cells)
# import mshr
# import dolfin
# # h = 2.5e-3
# h = 1.e-1
# # cell_size = 2 * pi / num_boundary_points
# c = mshr.Circle(dolfin.Point(0., 0., 0.), 1, int(2*pi / h))
# # cell_size = 2 * bounding_box_radius / res
# m = mshr.generate_mesh(c, 2.0 / h)
# coords = m.coordinates()
# coords = np.c_[coords, np.zeros(len(coords))]
# cells = m.cells().copy()
# mesh = meshplex.Mesh(coords, cells)
# # mesh = meshplex.lloyd_smoothing(mesh, 1.0e-4)
matrix, rhs = pyfvm.discretize_linear(Poisson(), mesh)
# ml = pyamg.smoothed_aggregation_solver(matrix)
ml = pyamg.ruge_stuben_solver(matrix)
u = ml.solve(rhs, tol=1e-10)
# from scipy.sparse import linalg
# u = linalg.spsolve(matrix, rhs)
mesh.write("out.vtk", point_data={"u": u})
return
if __name__ == "__main__":
test()
|
nschloe/pyfvm
|
examples/poisson_example_test.py
|
Python
|
gpl-3.0
| 2,039
|
[
"VTK"
] |
ae0ebe91537093d1376af5c21bc5e15a632a37c453722d6c52df9acad842f6db
|
#!/usr/bin/env python
"""
This is a Nipype pipeline for combining brain surface atlas labels.
It will combine the surface labels in .annot format,
and convert the labels to VTK format.
https://mail.nmr.mgh.harvard.edu/pipermail//freesurfer/2010-June/014620.html
`mris_translate_annotation <subject> <hemi> <in annot> <translation file> <out annot>`
``translation file``: text file that lists the labels (one per line)
you want to group, and the new label you want to create. You have to use
the RGB codes; each line will provide the input and output RGB values::
221 220 60 223 220 60
221 220 160 223 220 60
221 220 100 223 220 60
Authors:
- Arno Klein (arno@mindboggle.info) http://binarybottle.com
Copyright 2012, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
import os
#=============================================================================
# Setup: import libraries, set file paths, and initialize main workflow
#=============================================================================
#-----------------------------------------------------------------------------
# Steps to run
#-----------------------------------------------------------------------------
do_load_vtk_surfaces = False
do_combine_atlas_labels = 1
do_convert_atlas_annot = 1
do_convert_original_atlas_annot = 1
copy_to_output = 1
#-----------------------------------------------------------------------------
# From settings.py
#-----------------------------------------------------------------------------
output_path = '/projects/Mindboggle/output' # Where to save processing output
subjects_path = os.environ['SUBJECTS_DIR'] # FreeSurfer subjects directory
copy_path = subjects_path
base_path = '/projects/Mindboggle/mindboggle' # Mindboggle home directory
info_path = '/projects/Mindboggle/mindboggle/mindboggle/info' # info directory
temp_path = os.path.join(output_path, 'workspace') # Where to save temp files
label_string_old = 'labels.DKT31.manual'
label_string = 'labels.DKT25.manual'
relabel_file = os.path.join(info_path, 'labels.surface.DKT31to25.txt')
hemis = ['lh','rh']
#-----------------------------------------------------------------------------
# Subjects to process
#-----------------------------------------------------------------------------
from mindboggle.utils.io_table import read_columns
atlas_list_file = os.path.join(info_path, 'atlases101.txt')
subjects = read_columns(atlas_list_file, 1)[0]
subjects = ['OASIS-TRT-20-11']
#-----------------------------------------------------------------------------
# Import system and nipype Python libraries
#-----------------------------------------------------------------------------
from nipype.pipeline.engine import Workflow, Node
from nipype.interfaces.utility import Function as Fn
from nipype.interfaces.utility import IdentityInterface
from nipype.interfaces.io import DataGrabber, DataSink
#-----------------------------------------------------------------------------
# Import Mindboggle Python libraries
#-----------------------------------------------------------------------------
from mindboggle.utils.io_vtk import annot_to_vtk, surface_to_vtk
from mindboggle.label.relabel import relabel_annot_file
#-----------------------------------------------------------------------------
# Initialize main workflow
#-----------------------------------------------------------------------------
flow = Workflow(name='Atlas_relabeling_workflow')
flow.base_dir = temp_path
if not os.path.isdir(temp_path): os.makedirs(temp_path)
#=============================================================================
# Inputs and outputs
#=============================================================================
#-----------------------------------------------------------------------------
# Iterate inputs over subjects, hemispheres
# (surfaces are assumed to take the form: lh.pial or lh.pial.vtk)
#-----------------------------------------------------------------------------
info = Node(name = 'Inputs',
interface = IdentityInterface(fields=['subject', 'hemi']))
info.iterables = ([('subject', subjects), ('hemi', hemis)])
#-----------------------------------------------------------------------------
# Location and structure of the surface inputs
#-----------------------------------------------------------------------------
surf = Node(name = 'Surfaces',
interface = DataGrabber(infields=['subject', 'hemi'],
outfields=['surface_files']))
surf.inputs.base_directory = subjects_path
surf.inputs.template = os.path.join('%s', 'surf', '%s.%s')
surf.inputs.template_args['surface_files'] = [['subject', 'hemi', 'pial']]
flow.connect([(info, surf, [('subject','subject'), ('hemi','hemi')])])
#-----------------------------------------------------------------------------
# Outputs
#-----------------------------------------------------------------------------
datasink = Node(DataSink(), name = 'Results')
datasink.inputs.base_directory = output_path
datasink.inputs.container = 'results'
if not os.path.isdir(output_path): os.makedirs(output_path)
#-------------------------------------------------------------------------------
# Convert surfaces to VTK
#-------------------------------------------------------------------------------
if not do_load_vtk_surfaces:
convertsurf = Node(name = 'Surf_to_VTK',
interface = Fn(function = freesurface_to_vtk,
input_names = ['surface_file'],
output_names = ['vtk_file']))
flow.connect([(surf, convertsurf, [('surface_files','surface_file')])])
#=============================================================================
# Combine .annot labels and convert to VTK
#=============================================================================
atlasflow = Workflow(name='Atlas_workflow')
atlasflow.base_dir = temp_path
#-------------------------------------------------------------------------
# Combine atlas .annot labels
#-------------------------------------------------------------------------
if do_combine_atlas_labels:
combine_labels = Node(name='Combine_atlas_labels',
interface = Fn(function = relabel_annot_file,
input_names = ['hemi',
'subject',
'annot_name',
'new_annot_name',
'relabel_file'],
output_names = ['new_annot_name']))
atlasflow.add_nodes([combine_labels])
combine_labels.inputs.annot_name = label_string_old
combine_labels.inputs.new_annot_name = label_string
combine_labels.inputs.relabel_file = relabel_file
flow.connect([(info, atlasflow, [('hemi','Combine_atlas_labels.hemi'),
('subject','Combine_atlas_labels.subject')])])
#-----------------------------------------------------------------------------
# Convert .annot labels to VTK format
#-----------------------------------------------------------------------------
if do_convert_atlas_annot:
atlas_vtk = Node(name = 'Convert_atlas_labels',
interface = Fn(function = freeannot_to_vtk,
input_names = ['surface_file',
'hemi',
'subject',
'subjects_path',
'annot_name'],
output_names = ['vtk_file']))
atlasflow.add_nodes([atlas_vtk])
flow.connect([(info, atlasflow,
[('hemi','Convert_atlas_labels.hemi'),
('subject','Convert_atlas_labels.subject')])])
atlas_vtk.inputs.subjects_path = subjects_path
atlasflow.connect([(combine_labels, atlas_vtk,
[('new_annot_name','annot_name')])])
if do_load_vtk_surfaces:
flow.connect([('surf', 'Convert_atlas_labels.atlas_vtk',
[('surface_files','Convert_atlas_labels.surface_file')])])
else:
flow.connect([(convertsurf, atlasflow,
[('vtk_file','Convert_atlas_labels.surface_file')])])
# flow.connect([(atlasflow, datasink,
# [('Convert_atlas_labels.vtk_file','atlas_labels')])])
if do_convert_original_atlas_annot:
orig_atlas_vtk = Node(name = 'Convert_original_atlas_labels',
interface = Fn(function = freeannot_to_vtk,
input_names = ['surface_file',
'hemi',
'subject',
'subjects_path',
'annot_name'],
output_names = ['vtk_file']))
atlasflow.add_nodes([orig_atlas_vtk])
flow.connect([(info, atlasflow,
[('hemi','Convert_original_atlas_labels.hemi'),
('subject','Convert_original_atlas_labels.subject')])])
orig_atlas_vtk.inputs.subjects_path = subjects_path
orig_atlas_vtk.inputs.annot_name = label_string_old
if do_load_vtk_surfaces:
flow.connect([('surf', 'Convert_original_atlas_labels.atlas_vtk',
[('surface_files','Convert_original_atlas_labels.surface_file')])])
else:
flow.connect([(convertsurf, atlasflow,
[('vtk_file','Convert_original_atlas_labels.surface_file')])])
# flow.connect([(atlasflow, datasink,
# [('Convert_original_atlas_labels.vtk_file','atlas_labels')])])
##############################################################################
if __name__== '__main__':
flow.run()
#-------------------------------------------------------------------------
# Copy results to atlas label directories
#-------------------------------------------------------------------------
if copy_to_output:
for s in subjects:
for h in hemis:
if do_convert_atlas_annot:
src = os.path.join(temp_path, #output_path, datasink.inputs.container,
'Atlas_relabeling_workflow',
'Atlas_workflow',
'_hemi_' + h + '_subject_' + s,
'Convert_atlas_labels',
h + '.pial.' + label_string + '.vtk')
tgt = os.path.join(copy_path, s, 'label', h + '.' + label_string + '.vtk')
cmd = ' '.join(['cp', src, tgt])
print(cmd); os.system(cmd)
if do_convert_original_atlas_annot:
src = os.path.join(temp_path, #output_path, datasink.inputs.container,
'Atlas_relabeling_workflow',
'Atlas_workflow',
'_hemi_' + h + '_subject_' + s,
'Convert_original_atlas_labels',
h + '.pial.' + label_string_old + '.vtk')
tgt = os.path.join(copy_path, s, 'label', h + '.' + label_string_old + '.vtk')
cmd = ' '.join(['cp', src, tgt])
print(cmd); os.system(cmd)
|
binarybottle/mindboggle_sidelined
|
relabel_atlas_pipeline.py
|
Python
|
apache-2.0
| 11,697
|
[
"VTK"
] |
4c304941d487c58af036a54b1a74867ee7a863d88a71b921e2ddd24ce9c4faa3
|
# flake8: noqa
import copy
import math
def distance(p0, p1):
return math.sqrt(math.pow(p0[0] - p1[0], 2) + math.pow(p0[1] - p1[1], 2))
##############################################################################
# Diffing code from:
# http://stackoverflow.com/a/6333972/784831
def dict_diff(merge, lhs, rhs):
"""Generic dictionary difference."""
diff = {}
for key in lhs.keys():
# auto-merge for missing key on right-hand-side.
if (key not in rhs):
diff[key] = lhs[key]
# on collision, invoke custom merge function.
elif (lhs[key] != rhs[key]):
diff[key] = merge(lhs[key], rhs[key])
for key in rhs.keys():
# auto-merge for missing key on left-hand-side.
if (key not in lhs):
diff[key] = rhs[key]
return diff
def keep_diff(lhs, rhs):
"""Merge dictionaries using value from right-hand-side on conflict."""
merge = lambda l, r: r
return dict_diff(merge, lhs, rhs)
def push(x, k, v):
"""Returns copy of dict `x` with key `k` set to `v`."""
x = copy.copy(x)
x[k] = v
return x
def pop(x, k):
"""Returns copy of dict `x` without key `k`."""
x = copy.copy(x)
del x[k]
return x
def diff(lhs, rhs, k):
# transform list of dicts into 2 levels of dicts, 1st level index by k.
lhs = dict([(D[k], pop(D, k)) for D in lhs])
rhs = dict([(D[k], pop(D, k)) for D in rhs])
# diff at the 1st level.
c = dict_diff(keep_diff, lhs, rhs)
# transform to back to initial format.
return [push(D, k, K) for (K, D) in c.items()]
##############################################################################
"""
Author: Aaron MacDonald
Date: June 14, 2007
Description: An implementation of the precise permissive field
of view algorithm for use in tile-based games.
Based on the algorithm presented at
http://roguebasin.roguelikedevelopment.org/
index.php?title=
Precise_Permissive_Field_of_View.
You are free to use or modify this code as long as this notice is
included.
This code is released without warranty.
"""
def fieldOfView(startX, startY, mapWidth, mapHeight, radius,
funcVisitTile, funcTileBlocked):
"""
Determines which coordinates on a 2D grid are visible from a
particular coordinate.
startX, startY: The (x, y) coordinate on the grid that
is the centre of view.
mapWidth, mapHeight: The maximum extents of the grid. The
minimum extents are assumed to be both
zero.
radius: How far the field of view may extend
in either direction along the x and y
axis.
funcVisitTile: User function that takes two integers
representing an (x, y) coordinate. Is
used to "visit" visible coordinates.
funcTileBlocked: User function that takes two integers
representing an (x, y) coordinate.
Returns True if the coordinate blocks
sight to coordinates "behind" it.
"""
# Keep track of what tiles have been visited so that no tile will be visited twice.
visited = set()
# Will always see the centre.
funcVisitTile(startX, startY)
visited.add((startX, startY))
# Ge the dimensions of the actual field of view, making
# sure not to go off the map or beyond the radius.
if startX < radius:
minExtentX = startX
else:
minExtentX = radius
if mapWidth - startX - 1 < radius:
maxExtentX = mapWidth - startX - 1
else:
maxExtentX = radius
if startY < radius:
minExtentY = startY
else:
minExtentY = radius
if mapHeight - startY - 1 < radius:
maxExtentY = mapHeight - startY - 1
else:
maxExtentY = radius
# Northeast quadrant
__checkQuadrant(visited, startX, startY, 1, 1,
maxExtentX, maxExtentY,
funcVisitTile, funcTileBlocked)
# Southeast quadrant
__checkQuadrant(visited, startX, startY, 1, -1,
maxExtentX, minExtentY,
funcVisitTile, funcTileBlocked)
# Southwest quadrant
__checkQuadrant(visited, startX, startY, -1, -1,
minExtentX, minExtentY,
funcVisitTile, funcTileBlocked)
# Northwest quadrant
__checkQuadrant(visited, startX, startY, -1, 1,
minExtentX, maxExtentY,
funcVisitTile, funcTileBlocked)
# -------------------------------------------------------------
class __Line(object):
def __init__(self, xi, yi, xf, yf):
self.xi = xi
self.yi = yi
self.xf = xf
self.yf = yf
dx = property(fget=lambda self: self.xf - self.xi)
dy = property(fget=lambda self: self.yf - self.yi)
def pBelow(self, x, y):
return self.relativeSlope(x, y) > 0
def pBelowOrCollinear(self, x, y):
return self.relativeSlope(x, y) >= 0
def pAbove(self, x, y):
return self.relativeSlope(x, y) < 0
def pAboveOrCollinear(self, x, y):
return self.relativeSlope(x, y) <= 0
def pCollinear(self, x, y):
return self.relativeSlope(x, y) == 0
def lineCollinear(self, line):
return self.pCollinear(line.xi, line.yi) \
and self.pCollinear(line.xf, line.yf)
def relativeSlope(self, x, y):
return (self.dy * (self.xf - x)) \
- (self.dx * (self.yf - y))
class __ViewBump:
def __init__(self, x, y, parent):
self.x = x
self.y = y
self.parent = parent
class __View:
def __init__(self, shallowLine, steepLine):
self.shallowLine = shallowLine
self.steepLine = steepLine
self.shallowBump = None
self.steepBump = None
def __checkQuadrant(visited, startX, startY, dx, dy,
extentX, extentY, funcVisitTile, funcTileBlocked):
activeViews = []
shallowLine = __Line(0, 1, extentX, 0)
steepLine = __Line(1, 0, 0, extentY)
activeViews.append(__View(shallowLine, steepLine))
viewIndex = 0
# Visit the tiles diagonally and going outwards
#
# .
# .
# . .
# 9 .
# 5 8 .
# 2 4 7
# @ 1 3 6 . . .
maxI = extentX + extentY
i = 1
while i != maxI + 1 and len(activeViews) > 0:
if 0 > i - extentX:
startJ = 0
else:
startJ = i - extentX
if i < extentY:
maxJ = i
else:
maxJ = extentY
j = startJ
while j != maxJ + 1 and viewIndex < len(activeViews):
x = i - j
y = j
__visitCoord(visited, startX, startY, x, y, dx, dy,
viewIndex, activeViews,
funcVisitTile, funcTileBlocked)
j += 1
i += 1
def __visitCoord(visited, startX, startY, x, y, dx, dy, viewIndex,
activeViews, funcVisitTile, funcTileBlocked):
# The top left and bottom right corners of the current coordinate.
topLeft = (x, y + 1)
bottomRight = (x + 1, y)
while viewIndex < len(activeViews) and activeViews[viewIndex].steepLine.pBelowOrCollinear(
bottomRight[0], bottomRight[1]):
# The current coordinate is above the current view and is
# ignored. The steeper fields may need it though.
viewIndex += 1
if viewIndex == len(activeViews) or activeViews[viewIndex].shallowLine.pAboveOrCollinear(
topLeft[0], topLeft[1]):
# Either the current coordinate is above all of the fields
# or it is below all of the fields.
return
# It is now known that the current coordinate is between the steep
# and shallow lines of the current view.
isBlocked = False
# The real quadrant coordinates
realX = x * dx
realY = y * dy
if (startX + realX, startY + realY) not in visited:
visited.add((startX + realX, startY + realY))
funcVisitTile(startX + realX, startY + realY)
"""else:
# Debugging
print (startX + realX, startY + realY)"""
isBlocked = funcTileBlocked(startX + realX, startY + realY)
if not isBlocked:
# The current coordinate does not block sight and therefore
# has no effect on the view.
return
if activeViews[viewIndex].shallowLine.pAbove(bottomRight[0],
bottomRight[1]) and activeViews[viewIndex].steepLine.pBelow(
topLeft[0], topLeft[1]):
# The current coordinate is intersected by both lines in the
# current view. The view is completely blocked.
del activeViews[viewIndex]
elif activeViews[viewIndex].shallowLine.pAbove(
bottomRight[0], bottomRight[1]):
# The current coordinate is intersected by the shallow line of
# the current view. The shallow line needs to be raised.
__addShallowBump(topLeft[0], topLeft[1],
activeViews, viewIndex)
__checkView(activeViews, viewIndex)
elif activeViews[viewIndex].steepLine.pBelow(
topLeft[0], topLeft[1]):
# The current coordinate is intersected by the steep line of
# the current view. The steep line needs to be lowered.
__addSteepBump(bottomRight[0], bottomRight[1], activeViews,
viewIndex)
__checkView(activeViews, viewIndex)
else:
# The current coordinate is completely between the two lines
# of the current view. Split the current view into two views
# above and below the current coordinate.
shallowViewIndex = viewIndex
viewIndex += 1
steepViewIndex = viewIndex
activeViews.insert(shallowViewIndex,
copy.deepcopy(activeViews[shallowViewIndex]))
__addSteepBump(bottomRight[0], bottomRight[1],
activeViews, shallowViewIndex)
if not __checkView(activeViews, shallowViewIndex):
viewIndex -= 1
steepViewIndex -= 1
__addShallowBump(topLeft[0], topLeft[1], activeViews,
steepViewIndex)
__checkView(activeViews, steepViewIndex)
def __addShallowBump(x, y, activeViews, viewIndex):
activeViews[viewIndex].shallowLine.xf = x
activeViews[viewIndex].shallowLine.yf = y
activeViews[viewIndex].shallowBump = __ViewBump(x, y,
activeViews[viewIndex].shallowBump)
curBump = activeViews[viewIndex].steepBump
while curBump is not None:
if activeViews[viewIndex].shallowLine.pAbove(
curBump.x, curBump.y):
activeViews[viewIndex].shallowLine.xi = curBump.x
activeViews[viewIndex].shallowLine.yi = curBump.y
curBump = curBump.parent
def __addSteepBump(x, y, activeViews, viewIndex):
activeViews[viewIndex].steepLine.xf = x
activeViews[viewIndex].steepLine.yf = y
activeViews[viewIndex].steepBump = __ViewBump(x, y,
activeViews[viewIndex].steepBump)
curBump = activeViews[viewIndex].shallowBump
while curBump is not None:
if activeViews[viewIndex].steepLine.pBelow(
curBump.x, curBump.y):
activeViews[viewIndex].steepLine.xi = curBump.x
activeViews[viewIndex].steepLine.yi = curBump.y
curBump = curBump.parent
def __checkView(activeViews, viewIndex):
"""
Removes the view in activeViews at index viewIndex if
- The two lines are coolinear
- The lines pass through either extremity
"""
shallowLine = activeViews[viewIndex].shallowLine
steepLine = activeViews[viewIndex].steepLine
if (shallowLine.lineCollinear(steepLine) and (
shallowLine.pCollinear(0, 1) or shallowLine.pCollinear(1, 0))):
del activeViews[viewIndex]
return False
else:
return True
|
LikeMyBread/Saylua
|
saylua/modules/adventure/dungeons/maps/meta/helpers.py
|
Python
|
agpl-3.0
| 12,080
|
[
"VisIt"
] |
1e256afbfa93e92c9a00e96ecce6f390d4ebd536420ee2869e506d739b710f4b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#2012 Bruno Chareyre <bruno.chareyre@hmg.inpg.fr>
"""Example usage of a TesselationWrapper object for getting microscale quantities."""
# See Catalano2013a for the definition of micro-strain
# (http://dx.doi.org/10.1002/nag.2198 or free-access at arxiv http://arxiv.org/pdf/1304.4895.pdf)
tt=TriaxialTest()
tt.generate("test.yade")
O.load("test.yade")
O.run(100,True)
TW=TesselationWrapper()
TW.triangulate() #compute regular Delaunay triangulation, don’t construct tesselation
TW.computeVolumes() #will silently tesselate the packing, then compute volume of each Voronoi cell
TW.volume(10) #get volume associated to sphere of id 10
TW.setState(0) #store current positions internaly for later use as the "0" state
O.run(100,True) #make particles move a little (let's hope they will!)
TW.setState(1) #store current positions internaly in the "1" (deformed) state
#Now we can define strain by comparing states 0 and 1, and average them at the particles scale
TW.defToVtk("strain.vtk")
|
ThomasSweijen/yadesolute2
|
examples/tesselationwrapper/tesselationWrapper.py
|
Python
|
gpl-2.0
| 1,032
|
[
"VTK"
] |
45b766b0bcd175d3dcb8b4da2cda0e24f1a0e51d55b0ced7136910ae92e5d12b
|
from __future__ import division, print_function
import sys
import os.path
import optparse
import os
import tempfile
import time
import traceback
import numpy as np
from ase.io import read
from ase.parallel import world
from ase.utils import devnull
from ase.constraints import FixAtoms, UnitCellFilter
from ase.optimize import LBFGS
from ase.io.trajectory import PickleTrajectory
from ase.utils.eos import EquationOfState
from ase.calculators.calculator import get_calculator, names as calcnames
import ase.db as db
def main():
runner = Runner()
runner.parse()
if runner.errors:
sys.exit(runner.errors)
class Runner:
def __init__(self):
self.db = None
self.opts = None
self.errors = 0
self.names = []
self.calculator_name = None
if world.rank == 0:
self.logfile = sys.stdout
else:
self.logfile = devnull
def parse(self, args=None):
parser = self.make_parser()
self.add_options(parser)
self.opts, names = parser.parse_args(args)
if args is None and self.opts.interactive_python_session:
file = tempfile.NamedTemporaryFile()
file.write('import os\n')
file.write('if "PYTHONSTARTUP" in os.environ:\n')
file.write(' execfile(os.environ["PYTHONSTARTUP"])\n')
file.write('from ase.cli.run import Runner\n')
file.write('atoms = Runner().parse(%r)\n' %
([self.calculator_name] + sys.argv[1:]))
file.flush()
os.system('python -i %s' % file.name)
return
if self.calculator_name is None:
if names:
self.calculator_name = names.pop(0)
else:
parser.error('Missing calculator name')
atoms = self.run(names)
return atoms
def make_parser(self):
parser = optparse.OptionParser(
usage='ase-run calculator [options] [system, ...]',
description="Run calculation with one of ASE's calculators: " +
', '.join(calcnames) + '.')
return parser
def add_options(self, parser):
add = parser.add_option
add('-t', '--tag',
help='String tag added to filenames.')
add('-p', '--parameters', default='',
metavar='key=value,...',
help='Comma-separated key=value pairs of ' +
'calculator specific parameters.')
add('-d', '--database',
help='Use a filename with a ".db" extension for a sqlite3 ' +
'database or a ".json" extension for a simple json database. ' +
'Default is no database')
add('-S', '--skip', action='store_true',
help='Skip calculations already done.')
add('--properties', default='efsdMm',
help='Default value is "efsdMm" meaning calculate energy, ' +
'forces, stress, dipole moment, total magnetic moment and ' +
'atomic magnetic moments.')
add('-f', '--maximum-force', type=float,
help='Relax internal coordinates.')
add('--constrain-tags',
metavar='T1,T2,...',
help='Constrain atoms with tags T1, T2, ...')
add('-s', '--maximum-stress', type=float,
help='Relax unit-cell and internal coordinates.')
add('-E', '--equation-of-state', help='Equation of state ...')
add('--eos-type', default='sjeos', help='Selects the type of eos.')
add('-i', '--interactive-python-session', action='store_true')
add('-c', '--collection')
add('--modify', metavar='...',
help='Modify atoms with Python statement. ' +
'Example: --modify="atoms.positions[-1,2]+=0.1".')
add('--after', help='Perform operation after calculation. ' +
'Example: --after="atoms.calc.write(...)"')
def log(self, *args, **kwargs):
print(file=self.logfile, *args, **kwargs)
def run(self, names):
opts = self.opts
if self.db is None:
# Create database connection:
self.db = db.connect(opts.database, use_lock_file=True)
self.expand(names)
if not names:
names.insert(0, '-')
atoms = None
for name in names:
if atoms is not None:
del atoms.calc # release resources from last calculation
atoms = self.build(name)
if opts.modify:
exec opts.modify in {'atoms': atoms, 'np': np}
if name == '-':
name = atoms.info['key_value_pairs']['name']
skip = False
id = None
if opts.skip:
id = self.db.reserve(name=name)
if id is None:
skip = True
if not skip:
self.set_calculator(atoms, name)
tstart = time.time()
try:
self.log('Running:', name)
data = self.calculate(atoms, name)
except KeyboardInterrupt:
raise
except Exception:
self.log(name, 'FAILED')
traceback.print_exc(file=self.logfile)
tstop = time.time()
data = {'time': tstop - tstart}
self.db.write(None, ['failed'], name=name, data=data)
self.errors += 1
else:
tstop = time.time()
data['time'] = tstop - tstart
self.db.write(atoms, name=name, data=data)
if id:
del self.db[id]
return atoms
def calculate(self, atoms, name):
opts = self.opts
data = {}
if opts.maximum_force or opts.maximum_stress:
data = self.optimize(atoms, name)
if opts.equation_of_state:
data.update(self.eos(atoms, name))
data.update(self.calculate_once(atoms, name))
if opts.after:
exec opts.after in {'atoms': atoms, 'data': data}
return data
def expand(self, names):
if not self.names and self.opts.collection:
con = db.connect(self.opts.collection)
self.names = [dct.id for dct in con.select()]
if not names:
names[:] = self.names
return
if not self.names:
return
i = 0
while i < len(names):
name = names[i]
if name.count('-') == 1:
s1, s2 = name.split('-')
if s1 in self.names and s2 in self.names:
j1 = self.names.index(s1)
j2 = self.names.index(s2)
names[i:i + 1] = self.names[j1:j2 + 1]
i += j2 - j1
i += 1
def build(self, name):
if name == '-':
con = db.connect(sys.stdin, 'json')
return con.get_atoms(add_additional_information=True)
elif self.opts.collection:
con = db.connect(self.opts.collection)
return con.get_atoms(name)
else:
return read(name)
def set_calculator(self, atoms, name):
cls = get_calculator(self.calculator_name)
parameters = str2dict(self.opts.parameters)
if getattr(cls, 'nolabel', False):
atoms.calc = cls(**parameters)
else:
atoms.calc = cls(label=self.get_filename(name), **parameters)
def calculate_once(self, atoms, name):
opts = self.opts
for p in opts.properties or 'efsdMm':
property, method = {'e': ('energy', 'get_potential_energy'),
'f': ('forces', 'get_forces'),
's': ('stress', 'get_stress'),
'd': ('dipole', 'get_dipole_moment'),
'M': ('magmom', 'get_magnetic_moment'),
'm': ('magmoms', 'get_magnetic_moments')}[p]
try:
getattr(atoms, method)()
except NotImplementedError:
pass
data = {}
return data
def optimize(self, atoms, name):
opts = self.opts
if opts.constrain_tags:
tags = [int(t) for t in opts.constrain_tags.split(',')]
mask = [t in tags for t in atoms.get_tags()]
atoms.constraints = FixAtoms(mask=mask)
trajectory = PickleTrajectory(self.get_filename(name, 'traj'), 'w',
atoms)
if opts.maximum_stress:
optimizer = LBFGS(UnitCellFilter(atoms), logfile=self.logfile)
fmax = opts.maximum_stress
else:
optimizer = LBFGS(atoms, logfile=self.logfile)
fmax = opts.maximum_force
optimizer.attach(trajectory)
optimizer.run(fmax=fmax)
data = {}
if hasattr(optimizer, 'force_calls'):
data['force_calls'] = optimizer.force_calls
return data
def eos(self, atoms, name):
opts = self.opts
traj = PickleTrajectory(self.get_filename(name, 'traj'), 'w', atoms)
eps = 0.01
strains = np.linspace(1 - eps, 1 + eps, 5)
v1 = atoms.get_volume()
volumes = strains**3 * v1
energies = []
cell1 = atoms.cell
for s in strains:
atoms.set_cell(cell1 * s, scale_atoms=True)
energies.append(atoms.get_potential_energy())
traj.write(atoms)
traj.close()
eos = EquationOfState(volumes, energies, opts.eos_type)
v0, e0, B = eos.fit()
atoms.set_cell(cell1 * (v0 / v1)**(1 / 3), scale_atoms=True)
data = {'volumes': volumes,
'energies': energies,
'fitted_energy': e0,
'fitted_volume': v0,
'bulk_modulus': B,
'eos_type': opts.eos_type}
return data
def get_filename(self, name=None, ext=None):
if name is None:
if self.opts.tag is None:
filename = 'ase'
else:
filename = self.opts.tag
else:
if '.' in name:
name = name.rsplit('.', 1)[0]
if self.opts.tag is None:
filename = name
else:
filename = name + '-' + self.opts.tag
if ext:
filename += '.' + ext
return filename
def str2dict(s, namespace={}, sep='='):
"""Convert comma-separated key=value string to dictionary.
Examples:
>>> str2dict('xc=PBE,nbands=200,parallel={band:4}')
{'xc': 'PBE', 'nbands': 200, 'parallel': {'band': 4}}
>>> str2dict('a=1.2,b=True,c=ab,d=1,2,3,e={f:42,g:cd}')
{'a': 1.2, 'c': 'ab', 'b': True, 'e': {'g': 'cd', 'f': 42}, 'd': (1, 2, 3)}
"""
def myeval(value):
try:
value = eval(value, namespace)
except (NameError, SyntaxError):
pass
return value
dct = {}
s = (s + ',').split(sep)
for i in range(len(s) - 1):
key = s[i]
m = s[i + 1].rfind(',')
value = s[i + 1][:m]
if value[0] == '{':
assert value[-1] == '}'
value = str2dict(value[1:-1], namespace, ':')
elif value[0] == '(':
assert value[-1] == ')'
value = [myeval(t) for t in value[1:-1].split(',')]
else:
value = myeval(value)
dct[key] = value
s[i + 1] = s[i + 1][m + 1:]
return dct
|
askhl/ase
|
ase/cli/run.py
|
Python
|
gpl-2.0
| 11,700
|
[
"ASE"
] |
a121e3735d9c39926c94d6decf6238b3135c9f211a7be82ffd606469f7019b56
|
# Copyright (c) 2021, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Test ASCAT Level 2 reader.
"""
import os
import sys
import pytest
import unittest
from datetime import datetime
import numpy as np
import numpy.testing as nptest
from ascat.read_native.bufr import AscatL2BufrFile
from ascat.read_native.nc import AscatL2NcFile
from ascat.eumetsat.level2 import AscatL2File
from ascat.eumetsat.level2 import AscatL2NcFileList
from ascat.eumetsat.level2 import AscatL2BufrFileList
from ascat.eumetsat.level2 import AscatL2EpsFileList
eps_float_nan = -2147483648.
bufr_float_nan = 1.7e+38
uint8_nan = np.iinfo(np.uint8).max
uint16_nan = np.iinfo(np.uint16).max
float32_nan = -999999.
@pytest.mark.skipif(sys.platform == 'win32', reason="Does not work on Windows")
class Test_AscatL2BufrFile(unittest.TestCase):
def setUp(self):
"""
Setup test files.
"""
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_L2_SM_125', 'bufr', 'Metop_B')
fname = os.path.join(
data_path, 'M01-ASCA-ASCSMR02-NA-5.0-20170220050900.000000000Z-20170220055833-1207110.bfr')
self.reader = AscatL2BufrFile(fname)
def test_read(self):
"""
Test read.
"""
data, metadata = self.reader.read()
ssm_should = np.array(
[29.2, 30.2, 35.7, 38.6, 37.5, 37.6, 40.5, 44.5, 40.7,
39.7, 41.5, 38.8, 34.5, 36.8, 39.4, 41.2, 42.4, 42.9,
39.3, 30.5, 26.7, 26.5, 26.7, 23.9, 26.2])
lats_should = np.array(
[64.74398, 64.81854, 64.89284, 64.96688, 65.04066, 65.11416,
65.18739, 65.26036, 65.33304, 65.40545, 65.47758, 65.54942,
65.62099, 65.69226, 65.76324, 65.83393, 65.90432, 65.97442,
66.04422, 66.11371, 66.1829, 66.25177, 66.32034, 66.38859,
66.45653])
ssm_mean_should = np.array(
[36.7, 35.4, 33.4, 32.5, 32.5, 32., 31.2, 29.4, 28.7,
27.6, 25.8, 25.4, 25.5, 25.3, 24.4, 23.4, 22.3, 21.3,
20.4, 20.4, 19.9, 19.7, 20.3, 21.5, 22.9])
nptest.assert_allclose(data['lat'][:25], lats_should, atol=1e-5)
nptest.assert_allclose(data['Surface Soil Moisture (Ms)'][:25],
ssm_should, atol=1e-5)
nptest.assert_allclose(data['Mean Surface Soil Moisture'][:25],
ssm_mean_should, atol=1e-5)
class Test_AscatL2NcFile(unittest.TestCase):
def setUp(self):
"""
Setup test files.
"""
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_L2_SM_125', 'nc', 'Metop_A')
fname = os.path.join(
data_path,
'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_20170220041500_53652_eps_o_125_ssm_l2.nc')
self.reader = AscatL2NcFile(fname)
def test_read(self):
"""
Test read.
"""
data, metadata = self.reader.read()
ssm_should = np.array([2.96000004, 0., 0., 0., 0., 0., 0., 0., 0.,
1.82999992, 3.32999992, 4.78999996, 4.31999969,
2.53999996, 0., 3.83999991, 5.76999998, 1.5,
2.44000006, 4.11999989, 2.25999999, 2.65999985,
5.5999999, 5.53999996, 4.85999966])
lats_should = np.array([62.60224, 62.67133, 62.74015, 62.80871, 62.877,
62.94502, 63.01276, 63.08024, 63.14743,
63.21435, 63.28098, 63.34734, 63.41341,
63.47919, 63.54468, 63.60988, 63.67479,
63.7394, 63.80372, 63.86773, 63.93144,
63.99485, 64.05795, 64.12075, 64.18323])
ssm_mean_should = np.array([21.26000023, 21.27999878, 21.38999939,
22.43000031, 23.36999893, 24.51000023,
26.01000023, 27.04999924, 26.94999886,
26.63999939, 27.09999847, 27.56999969,
27.43000031, 26.64999962, 26.53999901,
27.48999977, 28.20999908, 28.38999939,
28.79999924, 29.21999931, 30.01000023,
30.97999954, 31.27999878, 31.8599987,
32.05999756])
nptest.assert_allclose(data['latitude'][:25], lats_should, atol=1e-5)
nptest.assert_allclose(data['soil_moisture'][:25],
ssm_should, atol=1e-5)
nptest.assert_allclose(data['mean_soil_moisture'][:25],
ssm_mean_should, atol=1e-5)
@pytest.mark.skipif(sys.platform == 'win32', reason="Does not work on Windows")
class Test_AscatL2NcFile_AscatL2BufrFile(unittest.TestCase):
def setUp(self):
"""
Setup test files.
"""
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_L2_SM_125')
fname_nc = os.path.join(
data_path, 'nc', 'Metop_A',
'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_20170220041500_53652_eps_o_125_ssm_l2.nc')
self.reader_nc = AscatL2NcFile(fname_nc)
fname_bufr = os.path.join(
data_path, 'bufr', 'Metop_A',
'M02-ASCA-ASCSMR02-NA-5.0-20170220041500.000000000Z-20170220055656-1207110.bfr')
self.reader_bufr = AscatL2BufrFile(fname_bufr)
def test_read(self):
"""
Test read.
"""
data_nc, metadata = self.reader_nc.read()
data_bufr, metadata = self.reader_bufr.read()
nptest.assert_allclose(data_nc['latitude'], data_bufr['lat'],
atol=1e-4)
nc_bufr_matching = {
'slope40': 'Slope At 40 Deg Incidence Angle',
'sigma40_error': 'Estimated Error In Sigma0 At 40 Deg Incidence Angle',
'utc_line_nodes': None,
'wet_backscatter': 'Wet Backscatter',
'swath_indicator': None,
'frozen_soil_probability': 'Frozen Land Surface Fraction',
'wetland_flag': 'Inundation And Wetland Fraction',
# The processing flag definition between BUFR and netCDF is slightly different
# 'proc_flag1': 'Soil Moisture Processing Flag',
'proc_flag2': None,
'abs_line_number': None,
'sat_track_azi': None,
'sigma40': 'Backscatter',
'soil_moisture': 'Surface Soil Moisture (Ms)',
'soil_moisture_error': 'Estimated Error In Surface Soil Moisture',
'rainfall_flag': 'Rain Fall Detection',
'soil_moisture_sensitivity': 'Soil Moisture Sensitivity',
'corr_flags': 'Soil Moisture Correction Flag',
'dry_backscatter': 'Dry Backscatter',
'aggregated_quality_flag': None,
'mean_soil_moisture': 'Mean Surface Soil Moisture',
'as_des_pass': None,
'slope40_error': 'Estimated Error In Slope At 40 Deg Incidence Angle',
'topography_flag': 'Topographic Complexity',
'snow_cover_probability': 'Snow Cover'}
# BUFR contains less accurate data so we only compare to 0.1
for nc_name in nc_bufr_matching:
bufr_name = nc_bufr_matching[nc_name]
if bufr_name is None:
continue
if nc_name in ['mean_soil_moisture']:
valid = ((data_nc[nc_name] != uint16_nan) &
(data_bufr[bufr_name] != bufr_float_nan))
elif nc_name in ['snow_cover_probability', 'rainfall_flag',
'topography_flag', 'frozen_soil_probability',
'wetland_flag', 'snow_cover_probability']:
valid = ((data_nc[nc_name] != uint8_nan) &
(data_bufr[bufr_name] != bufr_float_nan))
else:
valid = ((data_nc[nc_name] != eps_float_nan) &
(data_bufr[bufr_name] != bufr_float_nan))
nptest.assert_allclose(data_nc[nc_name][valid],
data_bufr[bufr_name][valid], atol=0.1)
@pytest.mark.skipif(sys.platform == 'win32', reason="Does not work on Windows")
class Test_AscatL2File(unittest.TestCase):
def setUp(self):
"""
Setup test files.
"""
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_generic_reader_data')
name_b = os.path.join(
data_path, 'bufr',
'M01-ASCA-ASCSMO02-NA-5.0-20180612035700.000000000Z-20180612044530-1281300.bfr')
name_e = os.path.join(
data_path, 'eps_nat',
'ASCA_SMO_02_M01_20180612035700Z_20180612053856Z_N_O_20180612044530Z.nat')
name_n = os.path.join(
data_path, 'nc',
'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPB+ASCAT_C_EUMP_20180612035700_29742_eps_o_250_ssm_l2.nc')
self.bufr = AscatL2File(name_b)
self.eps = AscatL2File(name_e)
self.nc = AscatL2File(name_n)
def test_read_all_formats(self):
"""
Test read.
"""
bufr_ds, metadata = self.bufr.read()
eps_ds, metadata = self.eps.read()
nc_ds, metadata = self.nc.read()
for coord in ['lon', 'lat']:
nptest.assert_allclose(bufr_ds[coord], eps_ds[coord], atol=1e-4)
nptest.assert_allclose(eps_ds[coord], nc_ds[coord], atol=1e-4)
nptest.assert_allclose(nc_ds[coord], bufr_ds[coord], atol=1e-4)
matching = ['sm', 'sm_noise', 'sm_mean', 'sig40', 'sig40_noise',
'slope40', 'slope40_noise', 'dry_sig40', 'wet_sig40',
'azi', 'sig', 'inc', 'sm_sens', 'snow_prob', 'frozen_prob',
'wetland', 'topo', 'sat_id', 'proc_flag', 'agg_flag',
'corr_flag', 'line_num', 'node_num', 'sat_id',
'swath_indicator']
# rounding issues in sat_track_azi leads to different as_des_pass
# 'as_des_pass', 'sat_track_azi'
# lists with no data fields
nc_none = ['azi', 'inc', 'sig', 'corr_flag', 'proc_flag']
# BUFR contain less accurate data so we only compare to 0.1
for field in matching:
# difference between the files should not be the case
if field == 'sig40':
mask = nc_ds[field] == float32_nan
bufr_ds[field][mask] = float32_nan
eps_ds[field][mask] = float32_nan
nptest.assert_allclose(bufr_ds[field], eps_ds[field], atol=0.1)
if field not in nc_none:
nptest.assert_allclose(eps_ds[field], nc_ds[field], atol=0.1)
nptest.assert_allclose(nc_ds[field], bufr_ds[field], atol=0.1)
def test_eps(self):
"""
Test read EPS.
"""
eps_ds, metadata = self.eps.read()
sm_should = np.array(
[69.11, 74.23, 74.12, 75.95, 76.23, 80.74, 83.45, 84.94, 84.28,
86.33, 86.19, 86.31, 87.64, 87.92, 90.65, 90.52, 89.71, 89.33,
91.41, 91.89, 94.51, 70.43, 67.75, 60.54, 69.43])
lat_should = np.array(
[64.06651, 64.21156, 64.355545, 64.49845, 64.64026, 64.78095,
64.9205, 65.05891, 65.19613, 65.33216, 65.46697, 65.600555,
65.73289, 65.86394, 65.9937, 66.12214, 66.249245, 66.374985,
66.499344, 66.62231, 66.743835, 69.63313, 69.698105, 69.760895,
69.821495])
lon_should = np.array(
[121.95572, 121.564156, 121.16849, 120.76867, 120.36467,
119.95644, 119.54396, 119.12719, 118.70608, 118.2806,
117.85073, 117.41643, 116.97765, 116.53439, 116.08661,
115.63427, 115.17735, 114.715836, 114.24969, 113.77889,
113.30343, 96.66666, 96.049965, 95.42956, 94.80551])
sm_mean_should = np.array(
[77.97, 77.57, 79.2, 78.38, 77.85, 79.81, 80.72, 81.23, 82.43,
82.11, 81.93, 82.55, 83.41, 81.84, 81.43, 81.28, 80.37, 79.6,
79.43, 78.02, 77.49, 42.42, 41.69, 42.99, 47.51])
t_should = np.array(
['2018-06-12T03:56:59.999', '2018-06-12T03:56:59.999',
'2018-06-12T03:56:59.999', '2018-06-12T03:56:59.999',
'2018-06-12T03:56:59.999', '2018-06-12T03:56:59.999',
'2018-06-12T03:56:59.999', '2018-06-12T03:57:03.750',
'2018-06-12T03:57:03.750', '2018-06-12T03:57:03.750'],
dtype='datetime64[ms]')
nptest.assert_allclose(eps_ds['lat'][:25], lat_should, atol=1e-5)
nptest.assert_allclose(eps_ds['lon'][:25], lon_should, atol=1e-5)
nptest.assert_allclose(eps_ds['sm'][:25], sm_should, atol=1e-5)
nptest.assert_allclose(eps_ds['sm_mean'][:25],
sm_mean_should, atol=1e-5)
nptest.assert_equal(eps_ds['time'][35:45], t_should)
@pytest.mark.skipif(sys.platform == 'win32', reason="Does not work on Windows")
class Test_AscatL2FileList(unittest.TestCase):
"""
Test read AscatL2FileList in various formats.
"""
def setUp(self):
"""
Setup test data.
"""
root_path = os.path.join(os.path.dirname(__file__), 'ascat_test_data',
'eumetsat', 'ASCAT_generic_reader_data')
path = os.path.join(root_path, 'bufr')
sat = 'b'
product = 'smo'
self.bufr_smo = AscatL2BufrFileList(path, sat, product)
path = os.path.join(root_path, 'nc')
sat = 'b'
product = 'smo'
self.nc_smo = AscatL2NcFileList(path, sat, product)
path = os.path.join(root_path, 'eps_nat')
sat = 'b'
product = 'smo'
self.eps_smo = AscatL2EpsFileList(path, sat, product)
def test_smo_read_date(self):
"""
Test read date for SMO formats.
"""
dt = datetime(2018, 6, 12, 3, 57, 0)
bufr_data, metadata = self.bufr_smo.read(dt)
nc_data, metadata = self.nc_smo.read(dt)
eps_data, metadata = self.eps_smo.read(dt)
for coord in ['lon', 'lat']:
nptest.assert_allclose(
bufr_data[coord], eps_data[coord], atol=1e-4)
nptest.assert_allclose(
eps_data[coord], nc_data[coord], atol=1e-4)
nptest.assert_allclose(
nc_data[coord], bufr_data[coord], atol=1e-4)
def test_smo_read_period(self):
"""
Test read period for SMO formats.
"""
dt_start = datetime(2018, 6, 12, 4, 0, 0)
dt_end = datetime(2018, 6, 12, 4, 13, 0)
bufr_data, metadata = self.bufr_smo.read_period(dt_start, dt_end)
nc_data, metadata = self.nc_smo.read_period(dt_start, dt_end)
eps_data, metadata = self.eps_smo.read_period(dt_start, dt_end)
for coord in ['lon', 'lat']:
nptest.assert_allclose(
bufr_data[coord], eps_data[coord], atol=1e-4)
nptest.assert_allclose(
eps_data[coord], nc_data[coord], atol=1e-4)
nptest.assert_allclose(
nc_data[coord], bufr_data[coord], atol=1e-4)
if __name__ == '__main__':
unittest.main()
|
TUW-GEO/ascat
|
tests/test_level2.py
|
Python
|
mit
| 17,088
|
[
"NetCDF"
] |
76186c07fd480685592932db68c13ddad5fa810b62521e8fb19be1ee0dcca4ec
|
import os
from setuptools import setup
def read(file):
return open(os.path.join(os.path.dirname(__file__), file)).read()
setup(
name="vsut",
version="1.6",
author="Alex Egger",
author_email="alex.egger96@gmail.com",
description="A simple unit testing framework for Python 3.4",
license="MIT",
keywords="unit unittest test testing",
url="http://github.com/zillolo/vsut-python",
packages=["vsut"],
scripts=["runner.py"],
entry_points = {"console_scripts" : ["vrun = runner:main"]},
long_description="""For usage information visit:
http://github.com/zillolo/vsut-python
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Testing"]
)
|
zillolo/vsut-python
|
setup.py
|
Python
|
mit
| 918
|
[
"VisIt"
] |
ecf308b37971446f18623fbbf4cafd1dd135816ad58aa428b118df4874dbaa0f
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule(object):
"""A serializable learning rate decay schedule.
`LearningRateSchedule`s can be passed in as the learning rate of optimizers in
`tf.keras.optimizers`. They can be serialized and deserialized using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
"""
@abc.abstractmethod
def __call__(self, step):
raise NotImplementedError("Learning rate schedule must override __call__")
@abc.abstractmethod
def get_config(self):
raise NotImplementedError("Learning rate schedule must override get_config")
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(ExponentialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
return math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a piecewise constant decay schedule."""
def __init__(
self,
boundaries,
values,
name=None):
"""Piecewise constant from boundaries and interval values.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as the
optimizer step.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,
and values[-1] when `step > boundaries[-1]`.
Raises:
ValueError: if the number of elements in the lists do not match.
"""
super(PiecewiseConstantDecay, self).__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of values")
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PiecewiseConstant"):
boundaries = ops.convert_n_to_tensor(self.boundaries)
values = ops.convert_n_to_tensor(self.values)
x_recomp = ops.convert_to_tensor(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(PolynomialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PolynomialDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
power = math_ops.cast(self.power, dtype)
global_step_recomp = math_ops.cast(step, dtype)
decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / self.decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp,
decay_steps_recomp)
p = math_ops.divide(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(initial_learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an inverse time decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(InverseTimeDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "InverseTimeDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.divide(initial_learning_rate, denom, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.experimental.CosineDecay")
class CosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name=None):
"""Applies cosine decay to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a cosine decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.keras.experimental.CosineDecay(
initial_learning_rate, decay_steps)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(CosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = 0.5 * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.experimental.CosineDecayRestarts")
class CosineDecayRestarts(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts."""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(CosineDecayRestarts, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
alpha = math_ops.cast(self.alpha, dtype)
t_mul = math_ops.cast(self._t_mul, dtype)
m_mul = math_ops.cast(self._m_mul, dtype)
global_step_recomp = math_ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = control_flow_ops.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.experimental.LinearCosineDecay")
class LinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(LinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.experimental.NoisyLinearCosineDecay")
class NoisyLinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a noisy linear cosine decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a noisy linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
return initial_learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(NoisyLinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
initial_variance = math_ops.cast(self.initial_variance, dtype)
variance_decay = math_ops.cast(self.variance_decay, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
math_ops.pow(1.0 + global_step_recomp, variance_decay))
std = math_ops.sqrt(variance)
noisy_linear_decayed = (
linear_decayed + random_ops.random_normal(
linear_decayed.shape, stddev=std))
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
noisy_linear_cosine_decayed = (
(alpha + noisy_linear_decayed) * cosine_decayed + beta)
return math_ops.multiply(
initial_learning_rate, noisy_linear_cosine_decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"initial_variance": self.initial_variance,
"variance_decay": self.variance_decay,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.optimizers.schedules.serialize")
def serialize(learning_rate_schedule):
return generic_utils.serialize_keras_object(learning_rate_schedule)
@keras_export("keras.optimizers.schedules.deserialize")
def deserialize(config, custom_objects=None):
return generic_utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay")
|
jhseu/tensorflow
|
tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py
|
Python
|
apache-2.0
| 38,681
|
[
"Gaussian"
] |
5d62f9ecc9e74aaac4112c85f37657d8cd9f422f2afe44476ba255ce4ca08103
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska and Stijn Van Hoey
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
class sceua(_algorithm):
"""
This class holds the Shuffled Complex Evolution Algortithm (SCE-UA) algorithm,
based on:
Duan, Q., Sorooshian, S. and Gupta, V. K. (1994)
Optimal use of the SCE-UA global optimization method for calibrating watershed models, J. Hydrol.
Based on the PYthon package Optimization_SCE
Copyright (c) 2011 Stijn Van Hoey.
Restructured and parallelized by Houska et al (2015):
Houska, T., Kraft, P., Chamorro-Chavez, A. and Breuer, L. (2015)
SPOTting Model Parameters Using a Ready-Made Python Package, PLoS ONE.
"""
def __init__(self, *args, **kwargs):
"""
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
* True: Simulation results will be saved
* False: Simulation results will not be saved
"""
kwargs['optimization_direction'] = 'minimize'
kwargs['algorithm_name'] = 'Shuffled Complex Evolution (SCE-UA) algorithm'
super(sceua, self).__init__(*args, **kwargs)
def simulate(self, id_params_tuple):
"""This overwrites the simple wrapper function of _algorithms.py
and makes a two phase mpi parallelization possbile:
1) burn-in
2) complex evolution
"""
if not self.repeat.phase: # burn-in
return _algorithm.simulate(self, id_params_tuple)
else: # complex-evolution
igs, x, xf, cx, cf, sce_vars = id_params_tuple
self.npg, self.nopt, self.ngs, self.nspl, self.nps, self.bl, self.bu, self.stochastic_parameters, discarded_runs = sce_vars
# Partition the population into complexes (sub-populations);
k1 = np.arange(self.npg, dtype=int)
k2 = k1 * self.ngs + igs
cx[k1, :] = x[k2, :]
cf[k1] = xf[k2]
# Evolve sub-population igs for self.self.nspl steps:
likes = []
sims = []
pars = []
for loop in range(self.nspl):
# Select simplex by sampling the complex according to a linear
# probability distribution
lcs = np.array([0] * self.nps)
lcs[0] = 1
for k3 in range(1, self.nps):
for i in range(1000):
lpos = int(np.floor(
self.npg + 0.5 - np.sqrt((self.npg + 0.5)**2 - self.npg * (self.npg + 1) * np.random.random())))
# check if the element has already been chosen
idx = (lcs[0:k3] == lpos).nonzero()
if idx[0].size == 0:
break
lcs[k3] = lpos
lcs.sort()
# Construct the simplex:
s = cx[lcs, :]
sf = cf[lcs]
snew, fnew, simulation, discarded_runs = self._cceua(s, sf, discarded_runs)
likes.append(fnew)
pars.append(snew)
sims.append(simulation)
# Replace the worst point in Simplex with the new point:
s[-1, :] = snew
sf[-1] = fnew
# Replace the simplex into the complex;
cx[lcs, :] = s
cf[lcs] = sf
# Sort the complex;
idx = np.argsort(cf)
cf = np.sort(cf)
cx = cx[idx, :]
# Replace the complex back into the population;
return igs, likes, pars, sims, cx, cf, k1, k2, discarded_runs
def sample(self, repetitions, ngs=20, kstop=100, pcento=0.0000001, peps=0.0000001):
"""
Samples from parameter distributions using SCE-UA (Duan, 2004),
converted to python by Van Hoey (2011), restructured and parallelized by Houska et al (2015).
Parameters
----------
repetitions: int
maximum number of function evaluations allowed during optimization
ngs: int
number of complexes (sub-populations), take more than the number of
analysed parameters
kstop: int
the number of past evolution loops and their respective objective value to assess whether the marginal improvement at the current loop (in percentage) is less than pcento
pcento: float
the percentage change allowed in the past kstop loops below which convergence is assumed to be achieved.
peps: float
Value of the normalized geometric range of the parameters in the population below which convergence is deemed achieved.
"""
self.set_repetiton(repetitions)
# Initialize SCE parameters:
self.ngs = ngs
randompar = self.parameter()['random']
self.nopt = randompar.size
self.npg = 2 * self.nopt + 1
self.nps = self.nopt + 1
self.nspl = self.npg
npt = self.npg * self.ngs
self.iseed = 1
self.discarded_runs = 0
self.bl, self.bu = self.parameter()['minbound'], self.parameter()[
'maxbound']
bound = self.bu - self.bl # np.array
self.stochastic_parameters = bound != 0
proceed = True
if self.breakpoint == 'read' or self.breakpoint == 'readandwrite':
data_frombreak = self.read_breakdata(self.dbname)
icall = data_frombreak[0]
x = data_frombreak[1][0]
xf = data_frombreak[1][1]
gnrng = data_frombreak[2]
elif self.breakpoint is None or self.breakpoint == 'write':
# Create an initial population to fill array x(npt,self.self.nopt):
x = self._sampleinputmatrix(npt, self.nopt)
nloop = 0
icall = 0
xf = np.zeros(npt)
print ('Starting burn-in sampling...')
# Burn in
param_generator = ((rep, x[rep]) for rep in range(int(npt)))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate the objective function
like = self.postprocessing(icall, randompar, simulations,chains=0)
xf[rep] = like
icall+=1
if self.status.stop:
print('Stopping samplig. Maximum number of repetitions reached already during burn-in')
proceed = False
break
# Sort the population in order of increasing function values;
idx = np.argsort(xf)
xf = np.sort(xf)
x = x[idx, :]
else:
raise ValueError("Don't know the breakpoint keyword {}".format(self.breakpoint))
# Record the best points;
bestx = x[0, :]
bestf = xf[0]
BESTF = bestf
BESTX = bestx
# Computes the normalized geometric range of the parameters
gnrng = np.exp(
np.mean(np.log((np.max(x[:, self.stochastic_parameters], axis=0) - np.min(x[:, self.stochastic_parameters], axis=0)) / bound[self.stochastic_parameters])))
# Check for convergency;
if self.status.rep >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED. SEARCH WAS STOPPED AT TRIAL NUMBER:')
print(self.status.rep)
print('OF THE INITIAL LOOP!')
if gnrng < peps:
print(
'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
print ('Burn-in sampling completed...')
# Begin evolution loops:
nloop = 0
criter = []
criter_change_pcent = 1e+5
self.repeat.setphase('ComplexEvo')
print ('Starting Complex Evolution...')
proceed = True
while icall < repetitions and gnrng > peps and criter_change_pcent > pcento and proceed == True:
nloop += 1
print ('ComplexEvo loop #%d in progress...' % nloop)
# Loop on complexes (sub-populations);
cx = np.zeros((self.npg, self.nopt))
cf = np.zeros((self.npg))
remaining_runs = repetitions - self.status.rep
if remaining_runs <= self.ngs:
self.ngs = remaining_runs-1
proceed = False
sce_vars = [self.npg, self.nopt, self.ngs, self.nspl,
self.nps, self.bl, self.bu, self.stochastic_parameters, self.discarded_runs]
param_generator = ((rep, x, xf, cx, cf, sce_vars)
for rep in range(int(self.ngs)))
for igs, likes, pars, sims, cx, cf, k1, k2, discarded_runs in self.repeat(param_generator):
x[k2, :] = cx[k1, :]
xf[k2] = cf[k1]
self.discard_runs = discarded_runs
for i in range(len(likes)):
if not self.status.stop:
like = self.postprocessing(i, pars[i], sims[i], chains=i+1)
else:
#Collect data from all slaves but do not save
proceed=False
like = self.postprocessing(i, pars[i], sims[i], chains=i+1, save_run=False)
self.discarded_runs+=1
print('Skipping saving')
if self.breakpoint == 'write' or self.breakpoint == 'readandwrite'\
and self.status.rep >= self.backup_every_rep:
work = (self.status.rep, (x, xf), gnrng)
self.write_breakdata(self.dbname, work)
# End of Loop on Complex Evolution;
# Shuffled the complexes;
idx = np.argsort(xf)
xf = np.sort(xf)
x = x[idx, :]
# Record the best and worst points;
bestx = x[0, :]
bestf = xf[0]
# appenden en op einde reshapen!!
BESTX = np.append(BESTX, bestx, axis=0)
BESTF = np.append(BESTF, bestf)
# Computes the normalized geometric range of the parameters
gnrng = np.exp(
np.mean(np.log((np.max(x[:, self.stochastic_parameters], axis=0) - np.min(x[:, self.stochastic_parameters], axis=0)) / bound[self.stochastic_parameters])))
criter = np.append(criter, bestf)
# Check for convergency;
if self.status.rep >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED.')
elif gnrng < peps:
print(
'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
elif nloop >= kstop: # necessary so that the area of high posterior density is visited as much as possible
print ('Objective function convergence criteria is now being updated and assessed...')
absolute_change = np.abs(
criter[nloop - 1] - criter[nloop - kstop])*100
denominator = np.mean(np.abs(criter[(nloop - kstop):nloop]))
if denominator == 0.0:
criter_change_pcent = 0.0
else:
criter_change_pcent = absolute_change / denominator
print ('Updated convergence criteria: %f' % criter_change_pcent)
if criter_change_pcent <= pcento:
print('THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE USER-SPECIFIED THRESHOLD %f' % (
kstop, pcento))
print(
'CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!')
elif self.status.stop:
proceed = False
break
# End of the Outer Loops
print('SEARCH WAS STOPPED AT TRIAL NUMBER: %d' % self.status.rep)
print('NUMBER OF DISCARDED TRIALS: %d' % self.discarded_runs)
print('NORMALIZED GEOMETRIC RANGE = %f' % gnrng)
print('THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f PERCENT' % (
kstop, criter_change_pcent))
# reshape BESTX
#BESTX = BESTX.reshape(BESTX.size // self.nopt, self.nopt)
self.final_call()
def _cceua(self, s, sf, discarded_runs):
# This is the subroutine for generating a new point in a simplex
#
# s(.,.) = the sorted simplex in order of increasing function values
# s(.) = function values in increasing order
#
# LIST OF LOCAL VARIABLES
# sb(.) = the best point of the simplex
# sw(.) = the worst point of the simplex
# w2(.) = the second worst point of the simplex
# fw = function value of the worst point
# ce(.) = the centroid of the simplex excluding wo
# snew(.) = new point generated from the simplex
# iviol = flag indicating if constraints are violated
# = 1 , yes
# = 0 , no
constant_parameters = np.invert(self.stochastic_parameters)
self.nps, self.nopt = s.shape
alpha = 1.0
beta = 0.5
# Assign the best and worst points:
sw = s[-1, :]
fw = sf[-1]
# Compute the centroid of the simplex excluding the worst point:
ce = np.mean(s[:-1, :], axis=0)
# Attempt a reflection point
snew = ce + alpha * (ce - sw)
snew[constant_parameters] = sw[constant_parameters]
# Check if is outside the bounds:
ibound = 0
s1 = snew - self.bl
idx = (s1 < 0).nonzero()
if idx[0].size != 0:
ibound = 1
s1 = self.bu - snew
idx = (s1 < 0).nonzero()
if idx[0].size != 0:
ibound = 2
if ibound >= 1:
snew = self._sampleinputmatrix(1, self.nopt)[0]
## fnew = functn(self.nopt,snew);
_, _, simulations = _algorithm.simulate(self, (1, snew))
like = self.postprocessing(1, snew, simulations, save_run=False, block_print=True)
discarded_runs+=1
fnew = like
# Reflection failed; now attempt a contraction point:
if fnew > fw:
snew = sw + beta * (ce - sw)
snew[constant_parameters] = sw[constant_parameters]
_, _, simulations = _algorithm.simulate(self, (2, snew))
like = self.postprocessing(2, snew, simulations, save_run=False, block_print=True)
discarded_runs+=1
fnew = like
# Both reflection and contraction have failed, attempt a random point;
if fnew > fw:
snew = self._sampleinputmatrix(1, self.nopt)[0]
_, _, simulations = _algorithm.simulate(self, (3, snew))
like = self.postprocessing(3, snew, simulations, save_run=False, block_print=True)
discarded_runs+=1
fnew = like
# END OF CCE
return snew, fnew, simulations, discarded_runs
def _sampleinputmatrix(self, nrows, npars):
'''
Create inputparameter matrix for nrows simualtions,
for npars with bounds ub and lb (np.array from same size)
distname gives the initial sampling ditribution (currently one for all parameters)
returns np.array
'''
x = np.zeros((nrows, npars))
for i in range(nrows):
x[i, :] = self.parameter()['random']
return x
|
bees4ever/spotpy
|
spotpy/algorithms/sceua.py
|
Python
|
mit
| 17,408
|
[
"Gaussian"
] |
9ab614b497dc88f604296b250a3d1a332bf5e9c2dc6e5f727478a055a190ebf5
|
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
|
Unidata/netcdf4-python
|
test/tst_vlen.py
|
Python
|
mit
| 7,675
|
[
"NetCDF"
] |
9a2063abf4466f21b7ae7d6e31d7fd6900656cef7f8ab00bad2568bacfe87dfc
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2010-2013 Francois Beaune, Jupiter Jazz Limited
# Copyright (c) 2014-2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import argparse
import datetime
import os
import subprocess
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
DEFAULT_TOOL_FILENAME = "projecttool.exe" if os.name == "nt" else "projecttool"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def walk(directory, recursive):
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
yield os.path.join(dirpath, filename)
else:
dirpath, dirnames, filenames = os.walk(directory).next()
for filename in filenames:
yield os.path.join(dirpath, filename)
#--------------------------------------------------------------------------------------------------
# Update a given project file.
#--------------------------------------------------------------------------------------------------
def update_project_file(filepath, tool_path):
print("updating {0}...".format(filepath))
subprocess.call([tool_path, "update", filepath])
#--------------------------------------------------------------------------------------------------
# Update all project files in a given directory (possibly recursively).
# Returns the number of updated project files.
#--------------------------------------------------------------------------------------------------
def update_project_files(tool_path, directory, recursive):
updated_file_count = 0
for filepath in walk(directory, recursive):
if os.path.splitext(filepath)[1] == ".appleseed":
update_project_file(filepath, tool_path)
updated_file_count += 1
return updated_file_count
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="normalize multiple project files and update "
"them to the latest format revision if necessary.")
parser.add_argument("-t", "--tool-path", metavar="tool-path",
help="set the path to the projecttool binary")
parser.add_argument("-r", "--recursive", action='store_true', dest="recursive",
help="scan the specified directory and all its subdirectories")
parser.add_argument("directory", help="directory to scan")
args = parser.parse_args()
# If no tool path is provided, search for the tool in the same directory as this script.
if args.tool_path is None:
script_directory = os.path.dirname(os.path.realpath(__file__))
args.tool_path = os.path.join(script_directory, DEFAULT_TOOL_FILENAME)
print("setting tool path to {0}.".format(args.tool_path))
start_time = datetime.datetime.now()
updated_file_count = update_project_files(args.tool_path, args.directory, args.recursive)
end_time = datetime.datetime.now()
print("updated {0} project file(s) in {1}.".format(updated_file_count, end_time - start_time))
if __name__ == '__main__':
main()
|
gospodnetic/appleseed
|
scripts/updatemany.py
|
Python
|
mit
| 4,821
|
[
"VisIt"
] |
8c277616f4b0c953ba75d95e6767b2537ae8fd53475457c828d67323663b1f18
|
#!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all March 2013 CANON-ECOHAB activities.
Mike McCann
MBARI 13 March 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_march2013', 'CANON-ECOHAB - March 2013',
description = 'Spring 2013 ECOHAB in San Pedro Bay',
x3dTerrains = { 'https://stoqs.mbari.org/x3d/SanPedroBasin50/SanPedroBasin50_10x-pop.x3d': {
'position': '-2523652.5 -4726093.2 3499413.2',
'orientation': '0.96902 -0.20915 -0.13134 1.74597',
'centerOfRotation': '-2505293.6 -4686937.5 3513055.2',
'VerticalExaggeration': '10',
}
},
grdTerrain = os.path.join(parentDir, 'SanPedroBasin50.grd')
)
# Aboard the Carson use zuma
##cl.tdsBase = 'http://zuma.rc.mbari.org/thredds/'
cl.tdsBase = 'http://odss.mbari.org/thredds/' # Use this on shore
cl.dodsBase = cl.tdsBase + 'dodsC/'
# 2-second decimated dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2013/netcdf/'
cl.dorado_files = [
'Dorado389_2013_074_02_074_02_decim.nc',
'Dorado389_2013_075_05_075_06_decim.nc',
'Dorado389_2013_076_01_076_02_decim.nc',
'Dorado389_2013_079_04_079_04_decim.nc',
'Dorado389_2013_080_02_080_02_decim.nc',
'Dorado389_2013_081_05_081_05_decim.nc',
'Dorado389_2013_081_06_081_06_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList',
'roll', 'pitch', 'yaw',
]
# Realtime telemetered (_r_) daphne data - insert '_r_' to not load the files
##cl.daphne_base = 'http://aosn.mbari.org/lrauvtds/dodsC/lrauv/daphne/2012/'
daphne_r_base = cl.dodsBase + 'CANON_march2013/lrauv/daphne/realtime/sbdlogs/2013/201303/'
daphne_r_files = [
'shore_201303132226_201303140449.nc',
'shore_201303140708_201303140729.nc',
'shore_201303140729_201303141609.nc',
'shore_201303141631_201303151448.nc',
'shore_201303141631_201303181540.nc',
]
cl.daphne_r_parms = [ 'sea_water_temperature', 'mass_concentration_of_chlorophyll_in_sea_water']
# Postrecovery full-resolution (_d_) daphne data - insert '_d_' for delayed-mode to not load the data
daphne_d_base = 'http://dods.mbari.org/opendap/data/lrauv/daphne/missionlogs/2013/'
daphne_d_files = [
'20130313_20130318/20130313T195025/201303131950_201303132226.nc',
'20130313_20130318/20130313T222616/201303132226_201303140321.nc',
'20130313_20130318/20130313T222616/201303132226_201303140705.nc',
'20130313_20130318/20130314T070622/201303140706_201303140729.nc',
'20130313_20130318/20130314T072813/201303140728_201303140846.nc',
'20130313_20130318/20130314T072813/201303140728_201303141601.nc',
'20130313_20130318/20130314T072813/201303141601_201303141629.nc',
'20130313_20130318/20130314T162843/201303141628_201303141901.nc',
'20130313_20130318/20130314T162843/201303141628_201303141924.nc',
'20130313_20130318/20130314T162843/201303141901_201303150303.nc',
'20130313_20130318/20130314T162843/201303150303_201303151019.nc',
'20130313_20130318/20130314T162843/201303151019_201303151821.nc',
'20130313_20130318/20130314T162843/201303151821_201303151901.nc',
'20130313_20130318/20130314T162843/201303151901_201303160253.nc',
'20130313_20130318/20130314T162843/201303160253_201303161024.nc',
'20130313_20130318/20130314T162843/201303161024_201303161826.nc',
'20130313_20130318/20130314T162843/201303161826_201303161900.nc',
'20130313_20130318/20130314T162843/201303161900_201303162301.nc',
'20130313_20130318/20130314T162843/201303162301_201303170637.nc',
'20130313_20130318/20130314T162843/201303170637_201303171444.nc',
'20130313_20130318/20130314T162843/201303171444_201303171701.nc',
'20130313_20130318/20130314T162843/201303171701_201303180033.nc',
'20130313_20130318/20130314T162843/201303180033_201303180835.nc',
'20130313_20130318/20130314T162843/201303180835_201303180904.nc',
'20130313_20130318/20130314T162843/201303180904_201303181637.nc',
'20130313_20130318/20130314T162843/201303181637_201303181649.nc',
'20130313_20130318/20130318T165540/201303181655_201303182034.nc',
'20130313_20130318/20130318T165540/201303181655_201303182153.nc',
'20130319_20130325/20130319T213509/201303192135_201303200025.nc',
'20130319_20130325/20130320T002513/201303200025_201303200103.nc',
'20130319_20130325/20130320T002513/201303200025_201303200117.nc',
'20130319_20130325/20130320T002513/201303200103_201303201612.nc',
'20130319_20130325/20130320T002513/201303201612_201303201612.nc',
'20130319_20130325/20130320T002513/201303201613_201303201903.nc',
'20130319_20130325/20130320T002513/201303201903_201303210202.nc',
'20130319_20130325/20130320T002513/201303210202_201303211003.nc',
'20130319_20130325/20130320T002513/201303211003_201303211011.nc',
'20130319_20130325/20130321T100747/201303211008_201303211210.nc',
'20130319_20130325/20130321T100747/201303211008_201303211557.nc',
'20130319_20130325/20130321T155349/201303211554_201303211718.nc',
'20130319_20130325/20130321T155349/201303211554_201303211804.nc',
'20130319_20130325/20130321T155349/201303211804_201303220301.nc',
'20130319_20130325/20130321T155349/201303220301_201303221106.nc',
'20130319_20130325/20130321T155349/201303221106_201303221201.nc',
'20130319_20130325/20130321T155349/201303221201_201303222301.nc',
'20130319_20130325/20130321T155349/201303222301_201303222313.nc',
'20130319_20130325/20130322T231504/201303222315_201303222324.nc',
'20130319_20130325/20130322T232523/201303222325_201303230002.nc',
'20130319_20130325/20130322T232523/201303222325_201303230018.nc',
'20130319_20130325/20130322T232523/201303230002_201303230824.nc',
'20130319_20130325/20130322T232523/201303230824_201303231619.nc',
'20130319_20130325/20130322T232523/201303231619_201303231702.nc',
'20130319_20130325/20130322T232523/201303231702_201303240113.nc',
'20130319_20130325/20130322T232523/201303240113_201303240206.nc',
'20130319_20130325/20130322T232523/201303240206_201303240916.nc',
'20130319_20130325/20130322T232523/201303240916_201303241000.nc',
'20130319_20130325/20130322T232523/201303241000_201303241723.nc',
'20130319_20130325/20130322T232523/201303241725_201303242002.nc',
'20130319_20130325/20130322T232523/201303242002_201303250425.nc',
'20130319_20130325/20130322T232523/201303250425_201303250518.nc',
'20130319_20130325/20130322T232523/201303250518_201303250848.nc',
'20130319_20130325/20130325T084507/201303250845_201303251544.nc',
]
daphne_d_parms = [ 'sea_water_temperature', 'sea_water_salinity', 'sea_water_density', 'volume_scattering_470_nm',
'volume_scattering_650_nm', 'mass_concentration_of_oxygen_in_sea_water', 'mole_concentration_of_nitrate_in_sea_water',
'mass_concentration_of_chlorophyll_in_sea_water']
# Binned Daphne data
daphne_b_base = 'http://odss.mbari.org/thredds/dodsC/CANON_march2013/lrauv/daphne/'
daphne_b_files = ['Daphne_ECOHAB_March2013.nc']
daphne_b_parms = ['temperature', 'salinity', 'chlorophyll', 'bb470', 'bb650']
cl.daphne_base = daphne_b_base
cl.daphne_files = daphne_b_files
cl.daphne_parms = daphne_b_parms
# Realtime telemetered (_r_) tethys data - insert '_r_' to not load the files
tethys_r_base = cl.dodsBase + 'CANON_march2013/lrauv/tethys/realtime/sbdlogs/2013/201303/'
tethys_r_files = [
'shore_201303140812_201303141247.nc',
'shore_201303141252_201303141329.nc',
'shore_201303141331_201303150644.nc',
'shore_201303150645_201303151308.nc',
'shore_201303151312_201303151339.nc',
'shore_201303151333_201303151334.nc',
'shore_201303151337_201303151503.nc',
'shore_201303151504_201303151706.nc',
'shore_201303151714_201303151730.nc',
'shore_201303151728_201303151747.nc',
'shore_201303151748_201303151947.nc',
'shore_201303151950_201303152001.nc',
'shore_201303152003_201303152011.nc',
'shore_201303152013_201303152026.nc',
'shore_201303152027_201303160953.nc',
'shore_201303160958_201303161025.nc',
'shore_201303161027_201303161039.nc',
'shore_201303161041_201303170254.nc',
'shore_201303170334_201303170607.nc',
'shore_201303170616_201303170638.nc',
'shore_201303170641_201303170646.nc',
'shore_201303170647_201303171828.nc',
'shore_201303171835_201303171849.nc',
'shore_201303171851_201303171856.nc',
'shore_201303171857_201303172034.nc',
'shore_201303172042_201303172051.nc',
'shore_201303172055_201303172058.nc',
'shore_201303172059_201303180702.nc',
'shore_201303180717_201303180736.nc',
'shore_201303180733_201303180742.nc',
'shore_201303180743_201303181632.nc', # Incomplete list of shore files
# Put effort into loading full-resolution data
]
tethys_r_parms = [ 'sea_water_temperature', 'mass_concentration_of_chlorophyll_in_sea_water', 'mole_concentration_of_nitrate_in_sea_water',
'platform_x_velocity_current', 'platform_y_velocity_current', 'platform_z_velocity_current']
# Postrecovery full-resolution tethys data - insert '_d_' for delayed-mode to not load the data
tethys_d_base = 'http://dods.mbari.org/opendap/data/lrauv/tethys/missionlogs/2013/'
tethys_d_files = [
'20130313_20130320/20130313T203723/201303132037_201303132240.nc',
'20130313_20130320/20130313T224020/201303132240_201303140239.nc',
'20130313_20130320/20130314T023827/201303140238_201303140547.nc',
'20130313_20130320/20130314T023827/201303140238_201303140715.nc',
'20130313_20130320/20130314T071458/201303140715_201303140731.nc',
'20130313_20130320/20130314T073047/201303140731_201303140803.nc',
'20130313_20130320/20130314T080454/201303140805_201303140811.nc',
'20130313_20130320/20130314T081138/201303140811_201303141248.nc',
'20130313_20130320/20130314T125102/201303141251_201303141329.nc',
'20130313_20130320/20130314T133105/201303141331_201303141424.nc',
'20130313_20130320/20130314T133105/201303141331_201303141602.nc',
'20130313_20130320/20130314T133105/201303141602_201303142309.nc',
'20130313_20130320/20130314T133105/201303142309_201303150644.nc',
'20130313_20130320/20130315T064246/201303150643_201303150802.nc',
'20130313_20130320/20130315T064246/201303150643_201303150909.nc',
'20130313_20130320/20130315T064246/201303150802_201303151102.nc',
'20130313_20130320/20130315T064246/201303151102_201303151308.nc',
'20130313_20130320/20130315T131039/201303151310_201303151331.nc',
'20130313_20130320/20130315T133305/201303151333_201303151335.nc',
'20130313_20130320/20130315T133635/201303151336_201303151503.nc',
'20130313_20130320/20130315T150400/201303151504_201303151601.nc',
'20130313_20130320/20130315T150400/201303151504_201303151706.nc',
'20130313_20130320/20130315T150400/201303151601_201303151706.nc',
'20130313_20130320/20130315T170914/201303151709_201303151725.nc',
'20130313_20130320/20130315T172729/201303151727_201303151747.nc',
'20130313_20130320/20130315T174744/201303151747_201303151947.nc',
'20130313_20130320/20130315T195016/201303151950_201303152002.nc',
'20130313_20130320/20130315T200217/201303152002_201303152011.nc',
'20130313_20130320/20130315T201305/201303152013_201303152027.nc',
'20130313_20130320/20130315T202717/201303152027_201303152201.nc',
'20130313_20130320/20130315T202717/201303152027_201303160254.nc',
'20130313_20130320/20130315T202717/201303152201_201303160004.nc',
'20130313_20130320/20130315T202717/201303160004_201303160651.nc',
'20130313_20130320/20130315T202717/201303160651_201303160953.nc',
'20130313_20130320/20130316T095712/201303160957_201303161025.nc',
'20130313_20130320/20130316T102632/201303161026_201303161040.nc',
'20130313_20130320/20130316T104017/201303161040_201303161302.nc',
'20130313_20130320/20130316T104017/201303161040_201303161529.nc',
'20130313_20130320/20130316T104017/201303161302_201303162011.nc',
'20130313_20130320/20130316T104017/201303162011_201303170333.nc',
'20130313_20130320/20130317T033239/201303170332_201303170602.nc',
'20130313_20130320/20130317T033239/201303170332_201303170608.nc',
'20130313_20130320/20130317T033239/201303170602_201303170608.nc',
'20130313_20130320/20130317T061040/201303170610_201303170639.nc',
'20130313_20130320/20130317T064112/201303170641_201303170646.nc',
'20130313_20130320/20130317T064639/201303170646_201303170802.nc',
'20130313_20130320/20130317T064639/201303170646_201303170944.nc',
'20130313_20130320/20130317T064639/201303170802_201303171511.nc',
'20130313_20130320/20130317T064639/201303171511_201303171828.nc',
'20130313_20130320/20130317T183135/201303171831_201303171849.nc',
'20130313_20130320/20130317T185106/201303171851_201303171856.nc',
'20130313_20130320/20130317T185723/201303171857_201303171956.nc',
'20130313_20130320/20130317T185723/201303171857_201303172006.nc',
'20130313_20130320/20130317T185723/201303172006_201303172034.nc',
'20130313_20130320/20130317T203717/201303172037_201303172051.nc',
'20130313_20130320/20130317T205336/201303172053_201303172058.nc',
'20130313_20130320/20130317T205906/201303172059_201303172202.nc',
'20130313_20130320/20130317T205906/201303172059_201303172244.nc',
'20130313_20130320/20130317T205906/201303172202_201303180512.nc',
'20130313_20130320/20130317T205906/201303180512_201303180702.nc',
'20130313_20130320/20130318T070527/201303180705_201303180731.nc',
'20130313_20130320/20130318T073303/201303180733_201303180742.nc',
'20130313_20130320/20130318T074256/201303180743_201303180902.nc',
'20130313_20130320/20130318T074256/201303180743_201303180903.nc',
'20130313_20130320/20130318T074256/201303180903_201303181606.nc',
'20130313_20130320/20130318T074256/201303181606_201303182352.nc',
'20130313_20130320/20130318T074256/201303182352_201303190101.nc',
'20130313_20130320/20130318T074256/201303190101_201303190235.nc',
'20130313_20130320/20130319T023834/201303190238_201303190257.nc',
'20130313_20130320/20130319T025944/201303190300_201303190302.nc',
'20130313_20130320/20130319T030324/201303190303_201303190703.nc',
'20130313_20130320/20130319T030324/201303190303_201303190721.nc',
'20130313_20130320/20130319T030324/201303190703_201303190817.nc',
'20130313_20130320/20130319T081955/201303190820_201303190845.nc',
'20130313_20130320/20130319T084718/201303190847_201303190849.nc',
'20130313_20130320/20130319T085014/201303190850_201303191101.nc',
'20130313_20130320/20130319T085014/201303190850_201303192307.nc',
'20130313_20130320/20130319T085014/201303191101_201303191804.nc',
'20130313_20130320/20130319T085014/201303191804_201303192307.nc',
'20130313_20130320/20130319T231047/201303192311_201303192333.nc',
'20130313_20130320/20130319T233504/201303192335_201303200004.nc',
'20130313_20130320/20130320T000452/201303200005_201303200056.nc',
'20130313_20130320/20130320T005923/201303200059_201303200132.nc',
'20130313_20130320/20130320T013358/201303200134_201303200136.nc',
'20130313_20130320/20130320T014500/201303200145_201303200203.nc',
'20130313_20130320/20130320T014500/201303200145_201303200228.nc',
'20130313_20130320/20130320T014500/201303200203_201303200916.nc',
'20130313_20130320/20130320T091648/201303200918_201303201726.nc',
'20130313_20130320/20130320T172551/201303201726_201303201854.nc',
'20130321_20130325/20130321T220027/201303212200_201303220305.nc',
'20130321_20130325/20130321T220027/201303220305_201303220547.nc',
'20130321_20130325/20130322T054706/201303220547_201303220804.nc',
'20130321_20130325/20130322T054706/201303220804_201303221510.nc',
'20130321_20130325/20130322T054706/201303221510_201303222301.nc',
'20130321_20130325/20130322T054706/201303222301_201303230404.nc',
'20130321_20130325/20130322T054706/201303230404_201303231114.nc',
'20130321_20130325/20130322T054706/201303231114_201303231852.nc',
'20130321_20130325/20130322T054706/201303231852_201303240302.nc',
'20130321_20130325/20130322T054706/201303240302_201303241003.nc',
'20130321_20130325/20130322T054706/201303241003_201303241732.nc',
'20130321_20130325/20130322T054706/201303241732_201303250203.nc',
'20130321_20130325/20130322T054706/201303250203_201303250902.nc',
'20130321_20130325/20130322T054706/201303250902_201303251600.nc',
'20130321_20130325/20130325T155211/201303251552_201303252211.nc',
]
tethys_d_parms = [ 'sea_water_temperature', 'sea_water_salinity', 'sea_water_density', 'volume_scattering_470_nm',
'volume_scattering_650_nm', 'mass_concentration_of_oxygen_in_sea_water', 'mole_concentration_of_nitrate_in_sea_water',
'mass_concentration_of_chlorophyll_in_sea_water']
# Binned Tethys data
tethys_b_base = 'http://odss.mbari.org/thredds/dodsC/CANON_march2013/lrauv/tethys/'
tethys_b_files = ['Tethys_ECOHAB_March2013.nc']
tethys_b_parms = ['temperature', 'salinity', 'chlorophyll', 'bb470', 'bb650']
cl.tethys_base = tethys_b_base
cl.tethys_files = tethys_b_files
cl.tethys_parms = tethys_b_parms
# Webb gliders
cl.hehape_base = cl.dodsBase + 'CANON_march2013/usc_glider/HeHaPe/processed/'
cl.hehape_files = [
'OS_Glider_HeHaPe_20130305_TS.nc',
'OS_Glider_HeHaPe_20130310_TS.nc',
]
cl.hehape_parms = [ 'TEMP', 'PSAL', 'BB532', 'CDOM', 'CHLA', 'DENS' ]
cl.rusalka_base = cl.dodsBase + 'CANON_march2013/usc_glider/Rusalka/processed/'
cl.rusalka_files = [
'OS_Glider_Rusalka_20130301_TS.nc',
]
cl.rusalka_parms = [ 'TEMP', 'PSAL', 'BB532', 'CDOM', 'CHLA', 'DENS' ]
# Spray glider - for just the duration of the campaign
cl.l_662_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line66/'
cl.l_662_files = ['OS_Glider_L_662_20120816_TS.nc']
cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2']
cl.l_662_startDatetime = datetime.datetime(2012, 9, 10)
cl.l_662_endDatetime = datetime.datetime(2012, 9, 20)
# MBARI ESPs Mack and Bruce
cl.espmack_base = cl.dodsBase + 'CANON_march2013/esp/instances/Mack/data/processed/'
cl.espmack_files = [
'ctd.nc',
]
cl.espmack_parms = [ 'TEMP', 'PSAL', 'chl', 'chlini', 'no3' ]
# Rachel Carson Underway CTD
cl.rcuctd_base = cl.dodsBase + 'CANON_march2013/carson/uctd/'
cl.rcuctd_files = [
'07413plm01.nc', '07513plm02.nc', '07613plm03.nc', '07913plm04.nc',
'08013plm05.nc', '08113plm06.nc',
]
cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
# Rachel Carson Profile CTD
cl.pctdDir = 'CANON_march2013/carson/pctd/'
cl.rcpctd_base = cl.dodsBase + cl.pctdDir
cl.rcpctd_files = [
'07413c01.nc', '07413c02.nc', '07413c03.nc', '07413c04.nc', '07413c05.nc', '07413c06.nc', '07413c07.nc',
'07413c08.nc', '07413c09.nc', '07413c10.nc', '07413c11.nc', '07513c12.nc', '07513c13.nc', '07513c14.nc',
'07513c15.nc', '07513c16.nc', '07513c17.nc', '07513c18.nc', '07513c19.nc', '07613c20.nc', '07613c21.nc',
'07613c22.nc', '07613c23.nc', '07613c24.nc', '07613c25.nc', '07613c26.nc', '07913c27.nc', '07913c28.nc',
'07913c29.nc', '07913c30.nc', '07913c31.nc', '08013c32.nc', '08013c33.nc', '08013c34.nc', '08013c35.nc',
'08013c36.nc', '08113c37.nc', '08113c38.nc', '08113c39.nc', '08113c40.nc', '08113c41.nc', '08113c42.nc',
'08113c43.nc',
]
cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
# Spray glider - for just the duration of the campaign
##cl.l_662_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line66/'
##cl.l_662_files = ['OS_Glider_L_662_20120816_TS.nc']
##cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2']
##cl.l_662_startDatetime = datetime.datetime(2012, 9, 1)
##cl.l_662_endDatetime = datetime.datetime(2012, 9, 21)
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=10)
cl.loadLRAUV('daphne', stride=10, build_attrs=False)
cl.loadLRAUV('tethys', stride=10, build_attrs=False)
##cl.loadESPmack()
##cl.loadESPbruce()
cl.loadRCuctd(stride=2)
cl.loadRCpctd(stride=1)
##cl.loadHeHaPe()
##cl.loadRusalka()
##cl.loadYellowfin()
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
cl.loadLRAUV('daphne', stride=2, build_attrs=False)
cl.loadLRAUV('tethys', stride=2, build_attrs=False)
##cl.loadESPmack()
##cl.loadESPbruce()
cl.loadRCuctd(stride=1)
cl.loadRCpctd(stride=1)
##cl.loadHeHaPe(stride=10) # As of 3/18/2013 - Bad Lat & Lon
##cl.loadRusalka(stride=10) # As of 3/18/2013 - no good data in file http://zuma.rc.mbari.org/thredds/dodsC/CANON_march2013/usc_glider/Rusalka/processed/OS_Glider_Rusalka_20130301_TS.nc.html
##cl.loadYellowfin()
else:
cl.stride = cl.args.stride
cl.loadDorado()
cl.loadLRAUV('daphne', build_attrs=False)
cl.loadLRAUV('tethys', build_attrs=False)
##cl.loadESPmack()
##cl.loadESPbruce()
cl.loadRCuctd()
cl.loadRCpctd()
##cl.loadHeHaPe()
##cl.loadRusalka()
##cl.loadYellowfin()
# Add any X3D Terrain information specified in the constructor to the database
cl.addTerrainResources()
print("All done.")
|
duane-edgington/stoqs
|
stoqs/loaders/CANON/loadCANON_march2013.py
|
Python
|
gpl-3.0
| 25,731
|
[
"NetCDF"
] |
a50c1d1f284206959cbfee05ff387da7dcd752c9ee9f44f0e605649e96bf8c97
|
from datetime import datetime
from flask import Flask, session
from flask.ext.mosession import MoSessionExtension
app = Flask(__name__)
app.config['MONGODB_HOST'] = '127.0.0.1'
app.config['MONGODB_PORT'] = 27017
app.config['MONGODB_DATABASE'] = 'session_test_db'
mosession = MoSessionExtension(app)
@app.route("/")
def hello():
if 'first_visit' not in session:
session['first_visit'] = datetime.now()
return 'Hi dear, your first visit was on ' + str(session['first_visit'])
if __name__ == '__main__':
app.run()
|
bayazee/flask-mosession
|
example/example.py
|
Python
|
bsd-3-clause
| 538
|
[
"VisIt"
] |
dc5b3140ccb0abebc03724041375c2efcc28dad789076058ead6b8cb92080052
|
# inspired by some code by Nathan Denny (1999)
# see http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html
try:
# use reduce against BDFL's will even on python > 2.6
from functools import reduce
except ImportError:
pass
class GraphException(Exception):
"""Base class for exception in the graph package."""
pass
class GraphTopologicalException(GraphException):
"""Exception thrown during a topological sort if the graph is cyclical."""
pass
def is_sequence(x):
return isinstance(x, (list, tuple))
def recursive_map(func, seq):
"""Apply a function recursively on a sequence and all subsequences."""
def _func(x):
if is_sequence(x):
return recursive_map(func, x)
else:
return func(x)
return map(_func, seq)
def recursive_reduce(func, seq, *argv):
"""Apply reduce(func, seq) recursively to a sequence and all its
subsequences."""
def _func(x, y):
if is_sequence(y):
return func(x, recursive_reduce(func, y))
else:
return func(x, y)
return reduce(_func, seq, *argv)
class GraphNode(object):
"""Represent a graph node and all information attached to it."""
def __init__(self, data=None):
self.data = data
# edges in
self.ein = []
# edges out
self.eout = []
def add_edge_in(self, edge):
self.ein.append(edge)
def add_edge_out(self, edge):
self.eout.append(edge)
def remove_edge_in(self, edge):
self.ein.remove(edge)
def remove_edge_out(self, edge):
self.eout.remove(edge)
def get_edges_in(self, from_ = None):
"""Return a copy of the list of the entering edges. If from_
is specified, return only the nodes coming from that node."""
inedges = self.ein[:]
if from_:
inedges = [edge for edge in inedges if edge.head == from_]
return inedges
def get_edges_out(self, to_ = None):
"""Return a copy of the list of the outgoing edges. If to_
is specified, return only the nodes going to that node."""
outedges = self.eout[:]
if to_:
outedges = [edge for edge in outedges if edge.tail == to_]
return outedges
def get_edges(self, neighbor = None):
"""Return a copy of all edges. If neighbor is specified, return
only the edges connected to that node."""
return ( self.get_edges_in(from_=neighbor) +
self.get_edges_out(to_=neighbor) )
def in_degree(self):
"""Return the number of entering edges."""
return len(self.ein)
def out_degree(self):
"""Return the number of outgoing edges."""
return len(self.eout)
def degree(self):
"""Return the number of edges."""
return self.in_degree()+self.out_degree()
def in_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_head(), self.ein)
def out_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return map(lambda x: x.get_tail(), self.eout)
def neighbors(self):
return self.in_neighbors() + self.out_neighbors()
class GraphEdge(object):
"""Represent a graph edge and all information attached to it."""
def __init__(self, head, tail, data=None):
# head node
self.head = head
# neighbors out
self.tail = tail
# arbitrary data slot
self.data = data
def get_ends(self):
"""Return the tuple (head_id, tail_id)."""
return (self.head, self.tail)
def get_tail(self):
return self.tail
def get_head(self):
return self.head
class Graph(object):
"""Represent a directed graph."""
def __init__(self):
# list of nodes
self.nodes = []
# list of edges
self.edges = []
# node functions
def add_node(self, data=None):
node = GraphNode(data=data)
self.nodes.append(node)
return node
def remove_node(self, node):
# the node is not in this graph
if node not in self.nodes:
errstr = 'This node is not part of the graph (%s)' % str(node_id)
raise GraphException(errstr)
# remove all edges containing this node
for edge in node.get_edges():
self.remove_edge(edge)
# remove the node
self.nodes.remove(node)
# edge functions
def add_edge(self, head, tail, data=None):
"""Add an edge going from head to tail.
head : head node
tail : tail node
"""
# create edge
edge = GraphEdge(head, tail, data=data)
# add edge to head and tail node
head.add_edge_out(edge)
tail.add_edge_in(edge)
# add to the edges dictionary
self.edges.append(edge)
return edge
def remove_edge(self, edge):
head, tail = edge.get_ends()
# remove from head
head.remove_edge_out(edge)
# remove from tail
tail.remove_edge_in(edge)
# remove the edge
self.edges.remove(edge)
### populate functions
def add_nodes(self, data):
"""Add many nodes at once.
data -- number of nodes to add or sequence of data values, one for
each new node"""
if not is_sequence(data):
data = [None]*data
return map(self.add_node, data)
def add_tree(self, tree):
"""Add a tree to the graph.
The tree is specified with a nested list of tuple, in a LISP-like
notation. The values specified in the list become the values of
the single nodes.
Return an equivalent nested list with the nodes instead of the values.
Example:
>>> a=b=c=d=e=None
>>> g.add_tree( (a, b, (c, d ,e)) )
corresponds to this tree structure, with all node values set to None:
a
/ \
b c
/ \
d e
"""
def _add_edge(root, son):
self.add_edge(root, son)
return root
nodes = recursive_map(self.add_node, tree)
recursive_reduce(_add_edge, nodes)
return nodes
def add_full_connectivity(self, from_nodes, to_nodes):
"""Add full connectivity from a group of nodes to another one.
Return a list of lists of edges, one for each node in 'from_nodes'.
Example: create a two-layer graph with full connectivity.
>>> g = Graph()
>>> layer1 = g.add_nodes(10)
>>> layer2 = g.add_nodes(5)
>>> g.add_full_connectivity(layer1, layer2)
"""
edges = []
for from_ in from_nodes:
edges.append(map(lambda x: self.add_edge(from_, x), to_nodes))
return edges
###### graph algorithms
def topological_sort(self):
"""Perform a topological sort of the nodes. If the graph has a cycle,
throw a GraphTopologicalException with the list of successfully
ordered nodes."""
# topologically sorted list of the nodes (result)
topological_list = []
# queue (fifo list) of the nodes with in_degree 0
topological_queue = []
# {node: in_degree} for the remaining nodes (those with in_degree>0)
remaining_indegree = {}
# init queues and lists
for node in self.nodes:
indegree = node.in_degree()
if indegree == 0:
topological_queue.append(node)
else:
remaining_indegree[node] = indegree
# remove nodes with in_degree 0 and decrease the in_degree of their sons
while len(topological_queue):
# remove the first node with degree 0
node = topological_queue.pop(0)
topological_list.append(node)
# decrease the in_degree of the sons
for son in node.out_neighbors():
remaining_indegree[son] -= 1
if remaining_indegree[son] == 0:
topological_queue.append(son)
# if not all nodes were covered, the graph must have a cycle
# raise a GraphTopographicalException
if len(topological_list)!=len(self.nodes):
raise GraphTopologicalException(topological_list)
return topological_list
### Depth-First sort
def _dfs(self, neighbors_fct, root, visit_fct=None):
# core depth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal dfs, reverse dfs, or
# dfs on the equivalent undirected graph, respectively
# result list containing the nodes in Depth-First order
dfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# stack (lifo) list
dfs_stack = []
dfs_stack.append(root)
while len(dfs_stack):
# consider the next node on the stack
node = dfs_stack.pop()
dfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the stack (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
dfs_stack.append(son)
return dfs_list
def dfs(self, root, visit_fct=None):
"""Return a list of nodes in some Depth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
The returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root.
"""
neighbors_fct = lambda node: node.out_neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_dfs(self, root, visit_fct=None):
"""Perform Depth First sort.
This function is identical to dfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
### Connected components
def connected_components(self):
"""Return a list of lists containing the nodes of all connected
components of the graph."""
visited = {}
def visit_fct(node, visited=visited):
visited[node] = None
components = []
nodes = self.nodes
for node in nodes:
if node in visited:
continue
components.append(self.undirected_dfs(node, visit_fct))
return components
def is_weakly_connected(self):
"""Return True if the graph is weakly connected."""
return len(self.undirected_dfs(self.nodes[0]))==len(self.nodes)
### Breadth-First Sort
# BFS and DFS could be generalized to one function. I leave them
# distinct for clarity.
def _bfs(self, neighbors_fct, root, visit_fct=None):
# core breadth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal bfs, reverse bfs, or
# bfs on the equivalent undirected graph, respectively
# result list containing the nodes in Breadth-First order
bfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# queue (fifo) list
bfs_queue = []
bfs_queue.append(root)
while len(bfs_queue):
# consider the next node in the queue
node = bfs_queue.pop(0)
bfs_list.append(node)
# visit the node
if visit_fct != None:
visit_fct(node)
# add all sons to the queue (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
bfs_queue.append(son)
return bfs_list
def bfs(self, root, visit_fct=None):
"""Return a list of nodes in some Breadth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
Note the returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root."""
neighbors_fct = lambda node: node.out_neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_bfs(self, root, visit_fct=None):
"""Perform Breadth First sort.
This function is identical to bfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
|
arnaudsj/mdp-toolkit
|
mdp/graph/graph.py
|
Python
|
bsd-3-clause
| 13,020
|
[
"VisIt"
] |
ae0d4f6f68b0425697e95bd22072b01e39d9248da07016e1977d06e8e1dd7a79
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
@deprecated("The DPGMM class is not working correctly and it's better "
"to not use it. DPGMM is deprecated in 0.18 and "
"will be removed in 0.20.")
class DPGMM(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params, verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The VBGMM class is not working correctly and it's better"
" to not use it. VBGMM is deprecated in 0.18 and "
"will be removed in 0.20.")
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
toastedcornflakes/scikit-learn
|
sklearn/mixture/dpgmm.py
|
Python
|
bsd-3-clause
| 33,044
|
[
"Gaussian"
] |
3a973c65404e3090a0a535fe60645c0d6701008bb683c3b0d209094bf511e9b9
|
# Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Integration tests for grib2 file loading.
These tests load various files from iris-test-data, and compare the cube with a
reference CML file, to catch any unexpected changes over time.
"""
# import iris_grib.tests first so that some things can be initialised
# before importing anything else.
import iris_grib.tests as tests
import iris
_RESULTDIR_PREFIX = ("integration", "load_convert", "sample_file_loads")
@tests.skip_data
class TestBasicLoad(tests.IrisGribTest):
def test_load_rotated(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "rotated_uk", "uk_wrongparam.grib1"))
)
self.assertCML(cubes, _RESULTDIR_PREFIX + ("rotated.cml",))
def test_load_time_bound(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "time_processed", "time_bound.grib1"))
)
self.assertCML(cubes, _RESULTDIR_PREFIX + ("time_bound_grib1.cml",))
def test_load_time_processed(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "time_processed", "time_bound.grib2"))
)
self.assertCML(cubes, _RESULTDIR_PREFIX + ("time_bound_grib2.cml",))
def test_load_3_layer(self):
cubes = iris.load(
tests.get_data_path(("GRIB", "3_layer_viz", "3_layer.grib2"))
)
cubes = iris.cube.CubeList([cubes[1], cubes[0], cubes[2]])
self.assertCML(cubes, _RESULTDIR_PREFIX + ("3_layer.cml",))
def test_load_masked(self):
gribfile = tests.get_data_path(
("GRIB", "missing_values", "missing_values.grib2")
)
cubes = iris.load(gribfile)
self.assertCML(cubes,
_RESULTDIR_PREFIX + ("missing_values_grib2.cml",))
def test_polar_stereo_grib1(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "polar_stereo", "ST4.2013052210.01h"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("polar_stereo_grib1.cml",))
def test_polar_stereo_grib2_grid_definition(self):
cube = iris.load_cube(
tests.get_data_path(
(
"GRIB",
"polar_stereo",
"CMC_glb_TMP_ISBL_1015_ps30km_2013052000_P006.grib2",
)
)
)
self.assertEqual(cube.shape, (200, 247))
pxc = cube.coord("projection_x_coordinate")
self.assertAlmostEqual(pxc.points.max(), 4769905.5125, places=4)
self.assertAlmostEqual(pxc.points.min(), -2610094.4875, places=4)
pyc = cube.coord("projection_y_coordinate")
self.assertAlmostEqual(pyc.points.max(), -216.1459, places=4)
self.assertAlmostEqual(pyc.points.min(), -5970216.1459, places=4)
self.assertEqual(pyc.coord_system, pxc.coord_system)
self.assertEqual(pyc.coord_system.grid_mapping_name, "stereographic")
self.assertEqual(pyc.coord_system.central_lat, 90.0)
self.assertEqual(pyc.coord_system.central_lon, 249.0)
self.assertEqual(pyc.coord_system.false_easting, 0.0)
self.assertEqual(pyc.coord_system.false_northing, 0.0)
self.assertEqual(pyc.coord_system.true_scale_lat, 60.0)
def test_lambert_grib1(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "lambert", "lambert.grib1"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("lambert_grib1.cml",))
def test_lambert_grib2(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "lambert", "lambert.grib2"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("lambert_grib2.cml",))
def test_regular_gg_grib1(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "gaussian", "regular_gg.grib1"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("regular_gg_grib1.cml",))
def test_regular_gg_grib2(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "gaussian", "regular_gg.grib2"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("regular_gg_grib2.cml",))
def test_reduced_ll(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "reduced", "reduced_ll.grib1"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("reduced_ll_grib1.cml",))
def test_reduced_gg(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "reduced", "reduced_gg.grib2"))
)
self.assertCML(cube, _RESULTDIR_PREFIX + ("reduced_gg_grib2.cml",))
@tests.skip_data
class TestIjDirections(tests.IrisGribTest):
@staticmethod
def _old_compat_load(name):
filepath = tests.get_data_path(("GRIB", "ij_directions", name))
cube = iris.load_cube(filepath)
return cube
def test_ij_directions_ipos_jpos(self):
cubes = self._old_compat_load("ipos_jpos.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ipos_jpos.cml",))
def test_ij_directions_ipos_jneg(self):
cubes = self._old_compat_load("ipos_jneg.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ipos_jneg.cml",))
def test_ij_directions_ineg_jneg(self):
cubes = self._old_compat_load("ineg_jneg.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ineg_jneg.cml",))
def test_ij_directions_ineg_jpos(self):
cubes = self._old_compat_load("ineg_jpos.grib2")
self.assertCML(cubes, _RESULTDIR_PREFIX + ("ineg_jpos.cml",))
@tests.skip_data
class TestShapeOfEarth(tests.IrisGribTest):
@staticmethod
def _old_compat_load(name):
filepath = tests.get_data_path(("GRIB", "shape_of_earth", name))
cube = iris.load_cube(filepath)
return cube
def test_shape_of_earth_basic(self):
# pre-defined sphere
cube = self._old_compat_load("0.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_0.cml",))
def test_shape_of_earth_custom_1(self):
# custom sphere
cube = self._old_compat_load("1.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_1.cml",))
def test_shape_of_earth_IAU65(self):
# IAU65 oblate sphere
cube = self._old_compat_load("2.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_2.cml",))
def test_shape_of_earth_custom_3(self):
# custom oblate spheroid (km)
cube = self._old_compat_load("3.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_3.cml",))
def test_shape_of_earth_IAG_GRS80(self):
# IAG-GRS80 oblate spheroid
cube = self._old_compat_load("4.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_4.cml",))
def test_shape_of_earth_WGS84(self):
# WGS84
cube = self._old_compat_load("5.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_5.cml",))
def test_shape_of_earth_pre_6(self):
# pre-defined sphere
cube = self._old_compat_load("6.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_6.cml",))
def test_shape_of_earth_custom_7(self):
# custom oblate spheroid (m)
cube = self._old_compat_load("7.grib2")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_7.cml",))
def test_shape_of_earth_grib1(self):
# grib1 - same as grib2 shape 6, above
cube = self._old_compat_load("global.grib1")
self.assertCML(cube, _RESULTDIR_PREFIX + ("earth_shape_grib1.cml",))
if __name__ == "__main__":
tests.main()
|
SciTools/iris-grib
|
iris_grib/tests/integration/load_convert/test_sample_file_loads.py
|
Python
|
lgpl-3.0
| 7,723
|
[
"Gaussian"
] |
c8f21e2f9c3eb877410d0c75693837922168a8a0629d5b82eb3686cb39df5793
|
###############################################################################
# Copyright 2017-2021 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "fstd2nc" package.
#
# "fstd2nc" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "fstd2nc" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "fstd2nc". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
Functionality for converting between FSTD and netCDF files.
"""
__version__ = "0.20220204.2"
# Check for bundled rpnpy package.
# Fall back to this one if no standard rpnpy package available.
try:
# Importing the module will set up the appropriate search paths.
import fstd2nc_deps
# Don't need a reference to the module after the paths are set.
del fstd2nc_deps
except ImportError:
pass
# Combine all the mixins to create a final interface for I/O.
from fstd2nc.mixins.select import SelectVars
from fstd2nc.mixins.ascii import ASCII
from fstd2nc.mixins.masks import Masks
from fstd2nc.mixins.dates import Dates
from fstd2nc.mixins.ensembles import Ensembles
from fstd2nc.mixins.series import Series
from fstd2nc.mixins.sfc_codes import Sfc_Codes
from fstd2nc.mixins.vardict import VarDict
from fstd2nc.mixins.vcoords import VCoords
from fstd2nc.mixins.xycoords import XYCoords
from fstd2nc.mixins.mesh import Mesh
from fstd2nc.mixins.misc import NoNK
from fstd2nc.mixins.filter import FilterRecords
from fstd2nc.mixins.removestuff import RemoveStuff
from fstd2nc.mixins.interp import Interp
from fstd2nc.mixins.pruneaxes import PruneAxes
from fstd2nc.mixins.netcdf import netCDF_Atts, netCDF_IO
from fstd2nc.mixins.compat import FSTD_Compat
from fstd2nc.mixins.extern import ExternInput, ExternOutput
class Buffer (ExternOutput,FSTD_Compat,netCDF_IO,netCDF_Atts,PruneAxes,Interp,RemoveStuff,FilterRecords,NoNK,Mesh,XYCoords,VCoords,Sfc_Codes,VarDict,Series,Ensembles,Dates,Masks,ASCII,SelectVars,ExternInput):
"""
High-level interface for FSTD data, to treat it as multi-dimensional arrays.
Contains logic for dealing with most of the common FSTD file conventions.
"""
def __init__ (self, filename, *args, **kwargs):
super(Buffer,self).__init__(filename, *args,**kwargs)
# Dynamically generate final init docstring from the mixins.
def _docstring ():
from fstd2nc.mixins import BufferBase
base_doc = BufferBase.__init__.__doc__
docstring = [base_doc.rstrip().strip('\n')]
for cls in Buffer.__bases__[::-1]:
doc = cls.__init__.__doc__
if doc is None or doc == base_doc: continue
docstring.append(doc.rstrip().strip('\n'))
return '\n'.join(docstring)
try:
# Python 2
Buffer.__init__.__func__.__doc__ = _docstring()
except AttributeError:
# Python 3
Buffer.__init__.__doc__ = _docstring()
|
neishm/fstd2nc
|
fstd2nc/__init__.py
|
Python
|
lgpl-3.0
| 3,321
|
[
"NetCDF"
] |
e57886c9b8cf922362bfc806012ce236eddac3479ddbb9010fbd0418d7172445
|
#
# Calc_ElasticModuli_from_VTK.py
#
# Written by Matthew Priddy on March 17, 2015
# Some functions contributed by Noah Paulson
# Input data also contributed by Noah Paulson
#
# Contact information:
# Matthew W. Priddy: mwpriddy (at) gatech (dot) edu
# mwpriddy (at) gmail (dot) com
#
# The purpose of this code is to determine directional elastic moduli from
# stress/strain data calculated with FEM/MKS/etc.
#
# The stress/strain data comes from uniaxial strain boundary conditions, but elastic moduli
# values are typically determined from unixial stress boundary conditions.
#
# Specifically, this code will:
# (1) volume average stress and strain components for loading in x-, y-, and z-directions
# (2) determine stiffness matrix (C_ij) components
# (3) invert stiffness matrix to determine compliance matrix (S_ij)
# (4) calculate elastic moduli for x-, y-, and z-direction
#
# Notes: (a) you must use decimals to return decimals
# (b) sin, cos, etc. use Radians
#
from sys import *
from string import *
from math import *
from numpy import *
import vtk
# Read in (or, in this case, assign) the file prefix and material names
#fileName = sys.argv[1]
#material = sys.argv[2]
fileName = "mks_alphaTi"
material = "random"
# Some VTK functions that might be used in this script
def VTK_Header(fileName, file_input, nx_pt, ny_pt, nz_pt, X, Y, Z, no_el):
fileName.write("# vtk DataFile Version 2.0" "\n")
fileName.write("data file: " + file_input + " generated by Matthew W. Priddy on " +str(time.strftime("%c")) + "\n")
fileName.write("ASCII" + "\n")
fileName.write("DATASET RECTILINEAR_GRID" + "\n")
fileName.write("DIMENSIONS " + str(nx_pt) + " " + str(ny_pt) + " " + str(nz_pt) + "\n")
fileName.write("X_COORDINATES " + str(nx_pt) + " float" "\n")
for i in range(len(X)):
fileName.write("% 2.4f " %(X[i]) )
if i == len(X):
fileName.write("\n" )
fileName.write("\n" )
fileName.write("Y_COORDINATES " + str(ny_pt) + " float" "\n")
for i in range(len(Y)):
fileName.write("% 2.4f " %(Y[i]) )
if i == len(Y):
fileName.write("\n" )
fileName.write("\n" )
fileName.write("Z_COORDINATES " + str(nz_pt) + " float" "\n")
for i in range(len(Z)):
fileName.write("% 2.4f " %(Z[i]) )
if i == len(Z):
fileName.write("\n" )
fileName.write("\n" )
fileName.write("CELL_DATA " + str(no_el) + "\n")
def VTK_Scalar(fileName, dataName, data, no_per_line):
fileName.write("SCALARS " + dataName + " float " + str(1) + "\n")
fileName.write("LOOKUP_TABLE default" "\n")
for i in range(len(data)):
fileName.write("% 2.6E " %(data[i]) )
i = i + 1
if i % no_per_line == 0:
fileName.write("\n" )
elif i == len(data):
fileName.write("\n" )
def VTK_Scalar_Int(fileName, dataName, data, no_per_line):
fileName.write("SCALARS " + dataName + " int " + str(1) + "\n")
fileName.write("LOOKUP_TABLE default" "\n")
for i in range(len(data)):
fileName.write("% 5d " %(data[i]) )
i = i + 1
if i % no_per_line == 0:
fileName.write("\n" )
elif i == len(data):
fileName.write("\n" )
def VTK_Vector(fileName, dataName, data, no_per_line):
fileName.write("VECTORS " + dataName + " float " + "\n")
for i in range(len(data[0,:])):
fileName.write(" % +2.6E % +2.6E % +2.6E " %(data[0,i], data[1,i], data[2,i]) )
i = i + 1
if i % no_per_line == 0:
fileName.write("\n" )
elif i == len(data[0,:]):
fileName.write("\n" )
def VTK_Tensor(fileName, dataName, data_00, data_01, data_02, data_11, data_12, data_22, no_per_line):
fileName.write("TENSORS " + dataName + " float " + "\n")
for i in range(len(data_00)):
fileName.write(" % +2.6E % +2.6E % +2.6E % +2.6E % +2.6E % +2.6E % +2.6E % +2.6E % +2.6E "
%(data_00[i], data_01[i], data_02[i], data_01[i], data_11[i], data_12[i], data_02[i], data_12[i], data_22[i]) + "\n" )
def read_vtk_tensor(filename, tensor_id, comp):
"""
Summary:
Much of this code was taken from Matthew Priddy's example file.
Inputs:
Outputs:
"""
# Initialize the reading of the VTK microstructure created by Dream3D
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
reader.ReadAllTensorsOn()
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
vec = list(dim)
vec = [i-1 for i in dim]
el = vec[0]
# Calculate the total number of elements
el_total = el**3
if tensor_id == 0:
# if meas == 0, we read the stress tensor
meas = data.GetCellData().GetArray(reader.GetTensorsNameInFile(0))
elif tensor_id == 1:
# if meas == 1, we read the strain tensor
meas = data.GetCellData().GetArray(reader.GetTensorsNameInFile(1))
elif tensor_id == 2:
# if meas == 2, we read the plastic strain tensor
meas = data.GetCellData().GetArray(reader.GetTensorsNameInFile(2))
meas_py = zeros([el_total])
for ii in xrange(el_total):
meas_py[ii] = meas.GetValue(ii*9 + comp)
return meas_py
def read_vtk_vector(filename):
"""
Summary:
Much of this code was taken from Matthew Priddy's example file.
Inputs:
Outputs:
"""
# Initialize the reading of the VTK microstructure created by Dream3D
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
reader.ReadAllTensorsOn()
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
vec = list(dim)
vec = [i-1 for i in dim]
el = vec[0]
# Calculate the total number of elements
el_total = el**3
Euler = data.GetCellData().GetArray(reader.GetVectorsNameInFile(0))
euler_py = zeros([3, el_total])
for ii in xrange(el_total):
euler_py[0, ii] = Euler.GetValue(ii*3 + 0)
euler_py[1, ii] = Euler.GetValue(ii*3 + 1)
euler_py[2, ii] = Euler.GetValue(ii*3 + 2)
return euler_py
def read_vtk_scalar(filename):
"""
Summary:
Much of this code was taken from Matthew Priddy's example file.
Inputs:
Outputs:
"""
# Initialize the reading of the VTK microstructure created by Dream3D
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
reader.ReadAllTensorsOn()
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
vec = list(dim)
vec = [i-1 for i in dim]
el = vec[0]
# Calculate the total number of elements
el_total = el**3
Scalar = data.GetCellData().GetArray(reader.GetScalarsNameInFile(0))
scalar_py = zeros([el_total])
for ii in xrange(el_total):
scalar_py[ii] = Scalar.GetValue(ii)
return scalar_py
###### Start of actual Code ######
# Initialize the average stress and strain tensors
stress_avg_xdir = zeros((3,3))
stress_avg_ydir = zeros((3,3))
stress_avg_zdir = zeros((3,3))
strain_avg_xdir = zeros((3,3))
strain_avg_ydir = zeros((3,3))
strain_avg_zdir = zeros((3,3))
# Iterate over the (a) number of simulations, (b) number of cycles, and (c) three loading directions
for num_simulations in range(0,1):
print "Simulation: " + str(num_simulations + 1)
for cycles in range(0,1):
cycles = cycles + 1
print " Cycle: " + str(cycles)
for num_directions in range(0,3):
print " Direction: " + str(num_directions + 1)
if num_directions == 0:
f1_all = fileName + '_Xdir_IDval_' + material + '_sn' + str(num_simulations) + '_step' + str(2*cycles - 1) + '.vtk'
elif num_directions == 1:
f1_all = fileName + '_Ydir_IDval_' + material + '_sn' + str(num_simulations) + '_step' + str(2*cycles - 1) + '.vtk'
elif num_directions == 2:
f1_all = fileName + '_Zdir_IDval_' + material + '_sn' + str(num_simulations) + '_step' + str(2*cycles - 1) + '.vtk'
# Initialize the reading of the VTK file
reader = vtk.vtkDataSetReader()
reader.SetFileName(f1_all)
reader.ReadAllTensorsOn()
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
dim = data.GetDimensions()
vec = list(dim)
vec = [i-1 for i in dim]
elements = vec[0]*vec[0]*vec[0]
# Preallocate stress and strain components
elemID_max = [0.0 for i in range(elements)]
strs_t00_max = [0.0 for i in range(elements)]
strs_t11_max = [0.0 for i in range(elements)]
strs_t22_max = [0.0 for i in range(elements)]
strs_t01_max = [0.0 for i in range(elements)]
strs_t02_max = [0.0 for i in range(elements)]
strs_t12_max = [0.0 for i in range(elements)]
strn_t00_max = [0.0 for i in range(elements)]
strn_t11_max = [0.0 for i in range(elements)]
strn_t22_max = [0.0 for i in range(elements)]
strn_t01_max = [0.0 for i in range(elements)]
strn_t02_max = [0.0 for i in range(elements)]
strn_t12_max = [0.0 for i in range(elements)]
# (1) Extract stress and strain for all elements in VTK file
# f1 is the initial max loading (tyically in tension)
strs_t00_max = read_vtk_tensor(f1_all, 0, 0)
strs_t11_max = read_vtk_tensor(f1_all, 0, 4)
strs_t22_max = read_vtk_tensor(f1_all, 0, 8)
strs_t01_max = read_vtk_tensor(f1_all, 0, 1)
strs_t02_max = read_vtk_tensor(f1_all, 0, 2)
strs_t12_max = read_vtk_tensor(f1_all, 0, 5)
strn_t00_max = read_vtk_tensor(f1_all, 1, 0)
strn_t11_max = read_vtk_tensor(f1_all, 1, 4)
strn_t22_max = read_vtk_tensor(f1_all, 1, 8)
strn_t01_max = read_vtk_tensor(f1_all, 1, 1)
strn_t02_max = read_vtk_tensor(f1_all, 1, 2)
strn_t12_max = read_vtk_tensor(f1_all, 1, 3)
# Average the normal stress and strain values in order to determine the elastic moduli for each loading direction
if num_directions == 0:
stress_avg_xdir[0,0] = sum(strs_t00_max) / len(strs_t00_max)
stress_avg_xdir[1,1] = sum(strs_t11_max) / len(strs_t11_max)
stress_avg_xdir[2,2] = sum(strs_t22_max) / len(strs_t22_max)
strain_avg_xdir[0,0] = sum(strn_t00_max) / len(strn_t00_max)
strain_avg_xdir[1,1] = sum(strn_t11_max) / len(strn_t11_max)
strain_avg_xdir[2,2] = sum(strn_t22_max) / len(strn_t22_max)
# stress_avg_xdir[0,1] = sum(strs_t01_max) / len(strs_t01_max)
# stress_avg_xdir[0,2] = sum(strs_t02_max) / len(strs_t02_max)
# stress_avg_xdir[1,0] = sum(strs_t01_max) / len(strs_t01_max)
# stress_avg_xdir[1,2] = sum(strs_t12_max) / len(strs_t12_max)
# stress_avg_xdir[2,0] = sum(strs_t02_max) / len(strs_t02_max)
# stress_avg_xdir[2,1] = sum(strs_t12_max) / len(strs_t12_max)
# strain_avg_xdir[0,1] = sum(strn_t01_max) / len(strn_t01_max)
# strain_avg_xdir[0,2] = sum(strn_t02_max) / len(strn_t02_max)
# strain_avg_xdir[1,0] = sum(strn_t01_max) / len(strn_t01_max)
# strain_avg_xdir[1,2] = sum(strn_t12_max) / len(strn_t12_max)
# strain_avg_xdir[2,0] = sum(strn_t02_max) / len(strn_t02_max)
# strain_avg_xdir[2,1] = sum(strn_t12_max) / len(strn_t12_max)
elif num_directions == 1:
stress_avg_ydir[0,0] = sum(strs_t00_max) / len(strs_t00_max)
stress_avg_ydir[1,1] = sum(strs_t11_max) / len(strs_t11_max)
stress_avg_ydir[2,2] = sum(strs_t22_max) / len(strs_t22_max)
strain_avg_ydir[0,0] = sum(strn_t00_max) / len(strn_t00_max)
strain_avg_ydir[1,1] = sum(strn_t11_max) / len(strn_t11_max)
strain_avg_ydir[2,2] = sum(strn_t22_max) / len(strn_t22_max)
elif num_directions == 2:
stress_avg_zdir[0,0] = sum(strs_t00_max) / len(strs_t00_max)
stress_avg_zdir[1,1] = sum(strs_t11_max) / len(strs_t11_max)
stress_avg_zdir[2,2] = sum(strs_t22_max) / len(strs_t22_max)
strain_avg_zdir[0,0] = sum(strn_t00_max) / len(strn_t00_max)
strain_avg_zdir[1,1] = sum(strn_t11_max) / len(strn_t11_max)
strain_avg_zdir[2,2] = sum(strn_t22_max) / len(strn_t22_max)
# Determine elastic moduli from averaged data
C_matrix = array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
S_matrix = array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
# Define C_ij
C_matrix[0,0] = stress_avg_xdir[0,0] / strain_avg_xdir[0,0]
C_matrix[0,1] = stress_avg_xdir[1,1] / strain_avg_xdir[0,0]
C_matrix[0,2] = stress_avg_xdir[2,2] / strain_avg_xdir[0,0]
C_matrix[1,0] = stress_avg_ydir[0,0] / strain_avg_ydir[1,1]
C_matrix[1,1] = stress_avg_ydir[1,1] / strain_avg_ydir[1,1]
C_matrix[1,2] = stress_avg_ydir[2,2] / strain_avg_ydir[1,1]
C_matrix[2,0] = stress_avg_zdir[0,0] / strain_avg_zdir[2,2]
C_matrix[2,1] = stress_avg_zdir[1,1] / strain_avg_zdir[2,2]
C_matrix[2,2] = stress_avg_zdir[2,2] / strain_avg_zdir[2,2]
# print C_matrix
# Invert C_ij to determine S_ij
S_matrix = linalg.inv(C_matrix)
# print each of the elastic moduli
print " X-direction: E_11 = " + str(1.0/S_matrix[0,0])
print " Y-direction: E_22 = " + str(1.0/S_matrix[1,1])
print " Z-direction: E_33 = " + str(1.0/S_matrix[2,2])
|
mwpriddy/python_scripts_research
|
Calc_ElasticModuli_from_VTK/Calc_ElasticModuli_from_VTK.py
|
Python
|
mit
| 13,800
|
[
"VTK"
] |
f54128d646f9deb370306032e52f50ed6cdbef372745494c2bde11e092f0f808
|
"""
this file does variant calling for RNAseq
"""
#============= import required packages =================
import os
import sys
import subprocess
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # disable buffer
from f00_Message import Message
from f01_list_trim_fq import list_files_human,Trimmomatic
from f02_aligner_command import STAR2Pass
from f03_samtools import sam2bam_sort
from f07_picard import markduplicates,addReadGroup
from f08_GATK import *
from p01_FileProcess import remove,get_parameters,rg_bams
#============= define some parameters ===================
"""these parameters and read group names are different for
different samples, should only change this part for
running pipeline
"""
parFile = sys.argv[1]
param = get_parameters(parFile)
thread = param['thread']
email = param['email']
startMessage = param['startMessage']
endMessage = param['endMessage']
ref_fa = param['refSequence']
file_path = param['filePath']
starDb = param['alignerDb']
trim = param['trim']
phred = param['phred']
picard = param['picard']
trimmomatic = param['trimmomatic']
trimmoAdapter = param['trimmoAdapter']
gold_snp = param['dbSNP']
phaseINDEL= param['phase1INDEL']
gold_indel= param['MillINDEL']
omni = param['omni']
hapmap = param['hapMap']
gatk = param['gatk']
read_group = param['readGroup']
organism = param['organism']
##***************** Part 0. Build index file for bwa and GATK ******
##***************** Part I. Preprocess ============================
#======== 1. map and dedupping =====================================
#======== (0) enter the directory ========================
os.chdir(file_path)
Message(startMessage,email)
#======== (1) read files ================================
fastqFiles = list_files_human(file_path)
if trim == 'True':
fastqFiles = Trimmomatic(trimmomatic,fastqFiles,phred,trimmoAdapter)
sys.stdout.write('list file succeed\n')
sys.stdout.write('fastqFiles is: {fq}\n'.format(fq=fastqFiles))
#======== (2) align using 2 pass STAR ====================
try:
map_sams= STAR2Pass(fastqFiles,starDb,ref_fa,thread)
sys.stdout.write('align succeed\n')
sys.stdout.write('map_sams is: {map}\n'.format(map=map_sams))
except:
sys.stdout.write('align failed\n')
Message('align failed',email)
sys.exit(1)
#======== 2. Add read groups, sort,mark duplicates, and create index
#======== (1) sort and add group =========================
try:
sort_bams = sam2bam_sort(map_sams,thread)
sys.stdout.write('sort bam succeed\n')
sys.stdout.write('sort_bams is: {bam}\n'.format(bam=sort_bams))
except:
sys.stdout.write('sort bam failed\n')
Message('sort bam failed',email)
sys.exit(1)
try:
group_bams = addReadGroup(picard,sort_bams,read_group)
sys.stdout.write('add group succeed\n')
sys.stdout.write('group_bams is: {group}\n'.format(group=group_bams))
except:
sys.stdout.write('add group failed\n')
Message('add group failed',email)
sys.exit(1)
#======== (2) mark duplicates ============================
try:
dedup_bams = markduplicates(picard,group_bams)
sys.stdout.write('mark duplicate succeed\n')
sys.stdout.write('dedup_bams is: {dedup}\n'.format(dedup=dedup_bams))
remove(group_bams)
except:
sys.stdout.write('mark duplicate failed\n')
Message('mark duplicate failed',email)
sys.exit(1)
#======== 3. Split 'N' Trim and reassign mapping qualiteies
try:
split_bams = splitN(gatk,dedup_bams,ref_fa)
sys.stdout.write('split N succeed\n')
sys.stdout.write('split N is: {N}\n'.format(N=split_bams))
remove(dedup_bams)
except:
sys.stdout.write('split N failed\n')
Message('split N failed',email)
sys.exit(1)
#======== 4. Indel realignment ===========================
#======== (1) generate intervals =========================
try:
interval = RealignerTargetCreator(gatk,split_bams,ref_fa,thread,phaseINDEL,gold_indel)
sys.stdout.write('RealignerTarget Creator succeed\n')
sys.stdout.write('interval is: {int}\n'.format(int=interval))
except:
sys.stdout.write('RealignerTarget Creator failed\n')
Message('RealignerTarget Creator failed',email)
sys.exit(1)
#======== (2) realignment of target intervals ============
try:
realign_bams = IndelRealigner(gatk,split_bams,ref_fa,interval,phaseINDEL,gold_indel)
sys.stdout.write('IndelRealigner succeed\n')
sys.stdout.write('realign bams is: {reali}\n'.format(reali=realign_bams))
remove(split_bams)
except:
sys.stdout.write('IndelRealigner failed\n')
Message('IndelRealigner failed',email)
sys.exit(1)
#======== 5. Base quality recalibration =================
roundNum = 1
try:
recal_bams = BaseRecalibrator(gatk,realign_bams,ref_fa,gold_snp,
gold_indel,roundNum,thread)
sys.stdout.write('recalibration succeed\n')
sys.stdout.write('recal_bams is: {recal}\n'.format(recal=recal_bams))
except:
sys.stdout.write('recalibration failed\n')
Message('recalibration failed',email)
sys.exit(1)
#======== !!! merge lanes for the same sample ============
roundNum = '1'
if len(recal_bams) !=1:
try:
merged_bams = rg_bams(read_group,recal_bams)
sys.stdout.write('merge_bams is: {mer}\n'.format(mer=merged_bams))
remove(recal_bams)
except:
sys.stdout.write('merge failed\n')
Message('merge failed',email)
sys.exit(1)
try:
dedup_files = markduplicates(picard,merged_bams)
sys.stdout.write('dedup_files is: {dedup}\n'.format(dedup=dedup_files))
remove(merged_bams)
except:
sys.stdout.write('mark duplicate merged failed\n')
Message('mark uplicate merged failed',email)
sys.exit(1)
try:
interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,thread,
phaseINDEL,gold_indel)
realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,interval,
phaseINDEL,gold_indel)
remove(dedup_files)
sys.stdout.write('realign_bams is: {reali}\n'.format(reali=realign_bams))
sys.stdout.write('merge lanes succeed\n')
except:
sys.stdout.write('realign merged failed\n')
Message('realign merged failed',email)
sys.exit(1)
else:
realign_bams = recal_bams
#======== 6. Variant Calling =============================
try:
vcf_files = HaplotypeCaller_RNA_VCF(gatk,realign_bams,ref_fa,thread)
sys.stdout.write('2 round call succeed\n')
sys.stdout.write('vcf_files is: {vcf}\n'.format(vcf=vcf_files))
remove(realign_bams)
except:
sys.stdout.write('2 round call failed\n')
Message('2 round call failed',email)
sys.exit(1)
#======== 7. Variant filtering ===========================
try:
gold_varis = RNA_Vari_Filter(gatk,vcf_files,ref_fa)
sys.stdout.write('variant filter succeed\n')
sys.stdout.write('gold_varis is: {gold}\n'.format(gold=gold_varis))
sys.stdout.write('job finished succeessfully\n')
except:
sys.stdout.write('vairant filter failed')
Message('variant filter failed',email)
sys.exit(1)
Message(endMessage,email)
|
shl198/Projects
|
Human_GATK_vari_call/Human_GATK_RNA_vari_call.py
|
Python
|
mit
| 7,162
|
[
"BWA"
] |
f84985e31e24e84fb40f7a8a164bc7ab0f03dbce8c47b1f3b62cffe1a72187c9
|
""" This function uses a connected telescope object to take a sequence of images for lightcurve studies.
"""
from astropy.io import fits
from config import config
import datetime
import time
import os
import json
import urllib.request
import re
from astroplan import Observer
import astropy.units as u
from astropy.time import Time
from . import ch
import numpy as np
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_sun, Angle
# lazy global logger linked to the telescope.log in get_lightcurve
logger = None
#
# the observatory
#
class Observatory():
code = None
latitude = 0.0 # in decimal degrees
longitude = 0.0 # in decimal degrees
altitude = 0.0 # in meters
timzeone = None
# init
def __init__(self, code, latitude, longitude, altitude, timezone):
self.code = code
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.timezone = timezone
def toString(self):
observatory_string = 'observatory: code=%s, lat=%f, lon=%f, alt=%f' % (
self.code, self.latitude, self.longitude, self.altitude)
return observatory_string
#
# an astronomical observation
#
class Observation():
sequence = None # image sequence
target = None # target
observatory = None # observatory
observer = None # who's observing?
min_obs_alt = None # min alt to start observations in deg
# used by the Scheduler and others
obs_start_time = None # when will target best be observable?
min_obs_time = None # when is the target first observable?
max_obs_time = None # when is the target last observable?
max_alt_time = None # when is the
active = True # is this observation (still) active?
id = -1 # id
# init
def __init__(self, observatory, target, sequence, min_obs_alt, observer):
self.observatory = observatory
self.target = target
self.sequence = sequence
self.min_obs_alt = min_obs_alt
self.observer = observer
# self.getTimes()
def toString(self):
return '%s\n%s\n%smin_alt=%f deg\nobs_time=%s\nid=%d\nactive=%d\nmin_obs_time=%s\nmax_obs_time=%s\nmax_alt_time=%s\nuser=%s' % (self.observatory.toString(), self.target.toString(), self.sequence.toString(), self.min_obs_alt, self.obs_start_time, self.id, self.active, self.min_obs_time, self.max_obs_time, self.max_alt_time, self.observer)
# for this observation, get min/max observable times and max alt time
def getTimes(self):
# temp var to hold obs info
obs = {'time': [], 'id': []}
# init observatory location
observatory_location = EarthLocation(
lat=self.observatory.latitude*u.deg, lon=self.observatory.longitude*u.deg, height=self.observatory.altitude*u.m)
# get next sunrise and nearest sunset times
observatory_location_obsplan = Observer(longitude=self.observatory.longitude*u.deg, latitude=self.observatory.latitude *
u.deg, elevation=self.observatory.altitude*u.m, name=self.observatory.code, timezone=self.observatory.timezone)
sunset_time = observatory_location_obsplan.twilight_evening_nautical(
Time.now(), which="nearest")
sunrise_time = observatory_location_obsplan.twilight_morning_nautical(
Time.now(), which="next")
logger.debug('The nearest sunset is %s. The next sunrise is %s.' %
(sunset_time.iso, sunrise_time.iso))
# build alt-az coordinate frame for observatory over next ? hours (e.g., nearest sunset to next sunrise)
# start time is sunset or current time, if later...
now = Time.now()
if (now > sunset_time):
obs_time = Time.now()
else:
obs_time = sunset_time
delta_obs_time = np.linspace(
0, (sunrise_time-obs_time).sec/3600., 1000)*u.hour
# array of times between sunset and sunrise
times = obs_time + delta_obs_time
# celestial frame for this observatory over times
frame = AltAz(obstime=times, location=observatory_location)
# build target altaz relative to observatory
target_ra = self.target.getRa()
target_dec = self.target.getDec()
input_coordinates = target_ra + " " + target_dec
try:
target_coordinates = SkyCoord(
input_coordinates, unit=(u.hourangle, u.deg))
except:
pass
target_altaz = target_coordinates.transform_to(frame)
# when is target highest *and* above minimum altitude?
# when is it above min_obs_alt?
valid_alt_times = times[np.where(
target_altaz.alt >= self.min_obs_alt*u.degree)]
# when does the max alt occur?
if len(valid_alt_times) > 0:
self.min_obs_time = Time(
np.min(times[np.where(target_altaz.alt > self.min_obs_alt*u.degree)]))
self.max_obs_time = Time(
np.max(times[np.where(target_altaz.alt > self.min_obs_alt*u.degree)]))
self.max_alt_time = Time(
times[np.argmax(target_altaz.alt)])
else:
logger.error('Target (%s) is not observable.' %
self.target.getName())
#
# a target
#
class Target():
# init
def __init__(self, name, ra, dec):
self.name = name
self.ra = ra # hour:min:sec
self.dec = dec # deg:min:sec
# init with name and type only
@classmethod
def from_name(cls, keyword, observatory, type):
objects = Target.findObjects(keyword, observatory, type)
if len(objects) == 0:
logger.error('Could not find matching object for %s.' % keyword)
sys.exit(1)
else:
if len(objects) > 1:
logger.warn('Found multiple matching objects for %s. Using first object (%s).' % (
name, objects[0]['name']))
target = cls(objects[0]['name'], objects[0]['ra'], objects[0]['dec'])
return target
# name
def getName(self):
return self.name
def setName(self, name):
self.name = name
# ra = right ascension
def getRa(self):
return self.ra
def setRa(self, ra):
self.ra = ra
# dec = declination
def getDec(self):
return self.dec
def setDec(self, dec):
self.dec = dec
def toString(self):
return 'target: name=%s, ra=%s, dec=%s' % (self.name, self.ra, self.dec)
@staticmethod
def findObjects(keyword, observatory, type):
type = type.lower()
if (type == 'asteroid' or type == 'planet' or type == 'solar system'):
return Target.findSolarSystemObjects(keyword, observatory)
elif (type == 'star' or type == 'celestial' or type == 'galaxy'):
return Target.findCelestialObjects(keyword)
else:
logger.error("Unknown type (%s) in Target.findObjects." % type)
return []
@staticmethod
def findCelestialObjects(keyword):
results = Simbad.query_object(keyword)
if results == None:
return []
objects = []
for result in results:
objects.append({'type': 'Celestial', 'id': result['MAIN_ID'], 'name': result['MAIN_ID'].replace(' ', ''),
'ra': result['RA'], 'dec': result['DEC']})
return objects
# search solar system small bodies using JPL HORIZONS
@staticmethod
def findSolarSystemObjects(keyword, observatory):
# ch constants
# max airmass
max_airmass = 2.0 # 30 deg elevation
objects = []
# list of matches
object_names = []
# set to * to make the searches wider by default
suffix = ''
# two passes, one for major (and maybe small) and one for (only) small bodies
lookups = [keyword + suffix, keyword + suffix + ';']
for repeat in range(0, 2):
# user JPL Horizons batch to find matches
f = urllib.request.urlopen('https://ssd.jpl.nasa.gov/horizons_batch.cgi?batch=l&COMMAND="%s"' %
urllib.request.quote(lookups[repeat].upper()))
output = f.read().decode('utf-8') # the whole enchilada
# print output
lines = output.splitlines() # line by line
# no matches? go home
if re.search('No matches found', output):
logger.debug('No matches found in JPL Horizons for %s.' %
lookups[repeat].upper())
elif re.search('Target body name:', output):
logger.debug('Single match found in JPL Horizons for %s.' %
lookups[repeat].upper().replace(suffix, ''))
# just one match?
# if major body search (repeat = 0), ignore small body results
# if major body search, grab integer id
if repeat == 0:
if re.search('Small-body perts:', output):
continue
match = re.search(
'Target body name:\\s[a-zA-Z]+\\s\\((\\d+)\\)', output)
if match:
object_names.append(match.group(1))
else:
logger.error('Error. Could not parse id for single match major body (%s).' %
lookups[repeat].upper().replace(suffix, ''))
else:
# user search term is unique, so use it!
object_names.append(
lookups[repeat].upper().replace(suffix, ''))
elif repeat == 1 and re.search('Matching small-bodies', output):
logger.info('Multiple small bodies found in JPL Horizons for %s.' %
lookups[repeat].upper())
# Matching small-bodies:
#
# Record # Epoch-yr Primary Desig >MATCH NAME<
# -------- -------- ------------- -------------------------
# 4 (undefined) Vesta
# 34366 2000 RP36 Rosavestal
match_count = 0
for line in lines:
search_string = line.strip()
# look for small body list
match = re.search('^-?\\d+', search_string)
# parse out the small body parameters
if match:
match_count += 1
record_number = line[0:12].strip()
epoch_yr = line[12:22].strip()
primary_desig = line[22:37].strip()
match_name = line[37:len(line)].strip()
# print record_number, epoch_yr, primary_desig, match_name
# add semicolon for small body lookups
object_names.append(record_number + ';')
# check our parse job
match = re.search('(\\d+) matches\\.', output)
if match:
if int(match.group(1)) != match_count:
logger.error('Multiple JPL small body parsing error!')
else:
logger.info(
'Multiple JPL small body parsing successful!')
elif repeat == 0 and re.search('Multiple major-bodies', output):
logger.info('Multiple major bodies found in JPL Horizons for %s.' %
lookups[repeat].upper())
# Multiple major-bodies match string "50*"
#
# ID# Name Designation IAU/aliases/other
# ------- ---------------------------------- ----------- -------------------
# 501 Io JI
# 502 Europa JII
match_count = 0
for line in lines:
search_string = line.strip()
# look for major body list
match = re.search('^-?\\d+', search_string)
# parse out the major body parameters
if match:
match_count += 1
record_number = line[0:9].strip()
# negative major bodies are spacecraft,etc. Skip those!
if int(record_number) >= 0:
name = line[9:45].strip()
designation = line[45:57].strip()
other = line[57:len(line)].strip()
# print record_number, name, designation, other
# NO semicolon for major body lookups
object_names.append(record_number)
# check our parse job
match = re.search('Number of matches =([\\s\\d]+).', output)
if match:
if int(match.group(1)) != match_count:
logger.error('Multiple JPL major body parsing error!')
else:
logger.info(
'Multiple JPL major body parsing successful!')
# get *nearest* sunset and *next* sunrise times
# still not a big fan of this!
observatory_location_obsplan = Observer(longitude=observatory.longitude*u.deg, latitude=observatory.latitude *
u.deg, elevation=observatory.altitude*u.m, name=observatory.code, timezone=observatory.timezone)
start = observatory_location_obsplan.twilight_evening_nautical(
Time.now(), which="nearest")
end = observatory_location_obsplan.twilight_morning_nautical(
Time.now(), which="next")
logger.debug('The nearest sunset is %s. The next sunrise is %s.' %
(start.iso, end.iso))
logger.info('Found %d solar system match(es) for "%s".' %
(len(object_names), keyword))
count = 0
for object_name in object_names:
count += 1
# get ephemerides for target in JPL Horizons from start to end times
result = ch.query(object_name.upper(), smallbody=True)
result.set_epochrange(start.iso, end.iso, '15m')
result.get_ephemerides(observatory.code)
# return transit RA/DEC if available times exist
logger.debug(result)
if result and len(result['EL']):
imax = np.argmax(result['EL'])
ra = Angle(float(result['RA'][imax]) *
u.deg).to_string(unit=u.hour, sep=':')
dec = Angle(float(result['DEC'][imax]) *
u.deg).to_string(unit=u.degree, sep=':')
objects.append({'type': 'Solar System', 'id': object_name.upper(
), 'name': result['targetname'][0], 'ra': ra, 'dec': dec})
else:
logger.debug('The object ('+object_name+') is not observable.')
return objects
#
# settings for a single set of astronomical images
#
class Stack():
exposure = 10 # exposure time in seconds
filter = 'clear' # filter, e.g., clear, h-alpha, u-band, g-band, r-band, i-band, z-band
binning = 1 # binning, e.g. 1 or 2
count = 1 # number of images in this stack
do_pinpoint = True # refine pointing in between images
# init
def __init__(self, exposure, filter, binning, count, do_pinpoint=True):
self.exposure = exposure
self.filter = filter
self.binning = binning
self.count = count
self.do_pinpoint = do_pinpoint
def toString(self):
return 'image stack: exposure=%f, filter=%s, binning=%d, count=%d, do_pinpoint=%s' % (self.exposure, self.filter, self.binning, self.count, self.do_pinpoint)
#
# sequence of astronomical image stacks
#
class Sequence():
stacks = [] # list of image stacks
repeat = None # number of times to repeat this sequence
do_pinpoint = True # refine pointing in between stacks
# repeat as much as possible
CONTINUOUS = -1
# init
def __init__(self, stacks, repeat, do_pinpoint=True):
self.stacks = stacks
self.repeat = repeat
self.do_pinpoint = do_pinpoint
def addStack(self, stack):
self.stacks.append(stack)
def toString(self):
sequence_string = 'sequence: repeat=%d, do_pinpoint=%s\n' % (
self.repeat, self.do_pinpoint)
for stack in self.stacks:
sequence_string += ' %s\n' % stack.toString()
return sequence_string
# estimate the total duration in seconds of the observing sequence
def getDuration(self):
if self.repeat == -1:
logger.warn('Sequence getDuration called on continuous sequence.')
return -1
sequenceTime = 0
for stack in self.stacks:
sequenceTime += stack.exposure
sequenceTime *= self.repeat
logger.debug('Sequence duration is %f seconds.' % sequenceTime)
return sequenceTime
def get_lightcurve(telescope: 'Telescope') -> bool:
""" Take a sequence of images for lightcurve studies
MORE DETAIL HERE
Parameters
----------
telescope: Telescope
A connected and locked telescope object
Returns
-------
res: bool
True if image sequence was successful, False if otherwise
"""
#lazy logger hack
global logger
logger = telescope.log
#for now, read in image target, sequence, etc. from json file
cfg_path = '/'.join(['', 'home', config.telescope.username,
'lightcurve.json'])
if not os.path.isfile(cfg_path):
logger.error(
'Lightcurve configuration file (%s) not found.' % cfg_path)
return False
# load target and comparison observations
with open(cfg_path) as f:
cfg = json.load(f)
# user, hardcode for now
user = cfg['user']
# min obs altitude
min_obs_alt = float(cfg['min_obs_alt'])
# seo
observatory = Observatory(cfg['observatory']['code'], cfg['observatory']['latitude'], cfg['observatory']
['longitude'], cfg['observatory']['altitude'], cfg['observatory']['timezone'])
# pause time while waiting for object to become available
delay_time = cfg['delay_time']
# build main asteroid observation
observation_json = cfg['observations']
target_json = observation_json['target']
sequence_json = observation_json['sequences']['main']
stacks_json = sequence_json['stacks']
# build target
target = Target.from_name(
target_json['name'], observatory, target_json['type'])
logger.debug(target.toString().replace('\n', '; '))
# build image stacks
stacks = []
for stack_json in stacks_json:
stack = Stack(float(stack_json['exposure']), stack_json['filters'], int(
stack_json['binning']), int(stack_json['count']), stack_json['do_pinpoint'] if 'do_pinpoint' in stack_json else True)
logger.debug(stack.toString().replace('\n', '; '))
stacks.append(stack)
# build sequence
sequence = Sequence(stacks, int(sequence_json['repeat']), sequence_json['do_pinpoint'] if 'do_pinpoint' in sequence_json else True)
logger.debug(sequence.toString().replace('\n', '; '))
# build main observations
asteroid_main_observation = Observation(
observatory, target, sequence, min_obs_alt, user)
# get min, max, and max alt obs times
asteroid_main_observation.getTimes()
logger.debug(asteroid_main_observation.toString().replace('\n', '; '))
# build calibration asteroid/star observations
sequence_json = observation_json['sequences']['calibration']
stacks_json = sequence_json['stacks']
# build image stacks
stacks = []
for stack_json in stacks_json:
stack = Stack(float(stack_json['exposure']), stack_json['filters'], int(
stack_json['binning']), int(stack_json['count']), stack_json['do_pinpoint'] if 'do_pinpoint' in stack_json else True)
logger.debug(stack.toString().replace('\n', '; '))
stacks.append(stack)
# build sequence
sequence = Sequence(stacks, int(sequence_json['repeat']), sequence_json['do_pinpoint'] if 'do_pinpoint' in sequence_json else True)
logger.debug(sequence.toString().replace('\n', '; '))
# build calibration observations
asteroid_calibration_observation = Observation(
observatory, target, sequence, min_obs_alt, user)
asteroid_calibration_observation_duration_s = sequence.getDuration()
logger.debug(asteroid_calibration_observation.toString().replace('\n', '; '))
|
yerkesobservatory/seo
|
routines/lightcurve.py
|
Python
|
gpl-3.0
| 21,037
|
[
"Galaxy"
] |
5f29cd3f007cee4d598bff6082d788c59caef38c91a6dad0856667efa49bde6c
|
# Copyright (c) 2018, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Peter Ogden, Parimal Patel"
__copyright__ = "Copyright 2018, Xilinx"
__email__ = "pynq_support@xilinx.com"
import cffi
import math
import numpy as np
from .constants import *
_ffi = cffi.FFI()
class DP159:
"""Class to configure the TI SNDP159 HDMI redriver/retimer
"""
def __init__(self, master, address):
"""Construct a new driver
Parameters
----------
master : IIC master
I2C master that the device is connected to
address : int
I2C address of device
"""
self._master = master
self._address = address
self._buffer = _ffi.new("unsigned char [32]")
def _read(self, reg_addr):
self._buffer[0] = reg_addr
self._master.send(self._address, self._buffer, 1, 1)
self._master.receive(self._address, self._buffer, 1)
self._master.wait()
# Clear all of the interrupts
self._master.write(0x20, self._master.read(0x20))
return self._buffer[0]
def _write(self, reg_addr, data):
self._buffer[0] = reg_addr
self._buffer[1] = data
self._master.send(self._address, self._buffer, 2)
self._master.wait()
# Clear all of the interrupts
self._master.write(0x20, self._master.read(0x20))
def set_clock(self, refclk, line_rate):
"""Configure the device based on the line rate
"""
is20 = (line_rate // 1000000) > 3400
# These parameters are derived from the Xilinx ZCU104 reference
self._write(0x09, 0x06)
if is20:
self._write(0x0B, 0x9A)
self._write(0x0C, 0x49)
self._write(0x0D, 0x00)
self._write(0x0A, 0x36)
else:
self._write(0x0B, 0x80)
self._write(0x0C, 0x48)
self._write(0x0D, 0x00)
self._write(0x0A, 0x35)
def _get_int_div_table(fout, bypass):
if bypass:
NS1_Options = [1, 4, 5, 6]
else:
NS1_Options = [4, 5, 6]
table = []
OutDivMin = math.ceil(IDT_8T49N24X_FVCO_MIN / fout)
OutDivMax = math.floor(IDT_8T49N24X_FVCO_MAX / fout)
if OutDivMax in NS1_Options or OutDivMin in NS1_Options:
# Bypass NS2
NS2Min = 0
NS2Max = 0
else:
NS2Min = math.ceil(OutDivMin / NS1_Options[-1] / 2)
NS2Max = math.floor(OutDivMax / NS1_Options[0] / 2)
if NS2Max == 0:
NS2Max = 1
NS2Temp = NS2Min
while NS2Temp <= NS2Max:
for ns1 in NS1_Options:
if NS2Temp == 0:
OutDivTemp = ns1
else:
OutDivTemp = ns1 * NS2Temp * 2
VCOTemp = fout * OutDivTemp
if IDT_8T49N24X_FVCO_MIN <= VCOTemp <= IDT_8T49N24X_FVCO_MAX:
table.append((OutDivTemp, ns1))
NS2Temp += 1
return table
NS1Lookup = {4: 2, 5: 0, 6: 1}
def _calculate_settings(fin, fout):
settings = {}
divide = max(_get_int_div_table(fout, False))
fvco = fout * divide[0]
settings['NS1Ratio'] = divide[1]
settings['NS1_Reg'] = NS1Lookup[settings['NS1Ratio']]
settings['NS2Ratio'] = divide[0] // divide[1]
settings['NS2_Reg'] = settings['NS2Ratio'] // 2
# Assume always integer division
settings['NInt'] = divide[0] // 2
settings['NFrac'] = 0
# Calculate the divider from the reference crystal
fbdiv = fvco / (2 * IDT_8T49N24X_XTAL_FREQ)
settings['DSMInt'] = math.floor(fbdiv)
settings['DSMFrac'] = round((fbdiv - settings['DSMInt']) * pow(2, 21))
# Calculate settings for the phase detector
fin_ratio = fvco / fin
PMin = fin // IDT_8T49N24X_FPD_MAX
min_error = 99999999
M1_best = 0
P_best = 0
for i in range(PMin, IDT_8T49N24X_P_MAX):
M1 = round(i * fin_ratio)
if M1 < IDT_8T49N24X_M_MAX:
error = abs(fin_ratio - (M1 / i))
if error < min_error:
M1_best = M1
P_best = i
min_error = error
if error < 1e-9:
break
else:
break
settings['M1'] = M1_best
settings['Pre'] = P_best
LOS = (fvco // 8 // fin) + 3
if LOS < 6:
LOS = 6
settings['LOS'] = LOS
return settings
class IDT_8T49N24:
"""Driver for the IDT 8T49N24x series of clock generators
"""
def __init__(self, master, address):
"""Create a new instance of the IDT driver
Parameters
----------
master : IIC master
IIC master the device is connected to
address : int
IIC address of the device
"""
self._master = master
self._address = address
self._buffer = _ffi.new("unsigned char [32]")
if not self.check_device_id():
raise RuntimeError("Could not find IDT8TN24x")
self.enable(False)
self._configure(IDT_Synth)
self.enable(True)
def _configure(self, values):
for i, v in enumerate(values):
if i != 0x70: # Skip Calibration
self._write(i, v)
def _read(self, reg_addr):
attempts = 0
while True:
try:
self._buffer[0] = reg_addr >> 8
self._buffer[1] = reg_addr & 0xFF
self._master.send(self._address, self._buffer, 2, 1)
self._master.receive(self._address, self._buffer, 1, 0)
except:
attempts += 1
if attempts > 100:
raise
continue
break
return self._buffer[0]
def _write(self, reg_addr, value):
attempts = 0
while True:
try:
self._buffer[0] = reg_addr >> 8
self._buffer[1] = reg_addr & 0xFF
self._buffer[2] = value
self._master.send(self._address, self._buffer, 3, 0)
except:
attempts += 1
if attempts > 100:
raise
continue
break
def _update(self, reg_addr, value, mask):
data = self._read(reg_addr)
data &= ~mask
data |= (value & mask)
self._write(reg_addr, data)
def check_device_id(self):
device_id = (self._read(0x0002) & 0xF) << 12
device_id |= self._read(0x0003) << 4
device_id |= self._read(0x0004) >> 4
return device_id == 0x0606 or device_id == 65535
def enable(self, active):
if active:
value = 0x00
else:
value = 0x05
self._update(0x0070, value, 0x05)
def set_clock(self, freq, line_rate):
"""Configure the device based on the line rate
The parameter `line_rate` is left to keep consistent API with
other clock drivers.
"""
self._set_clock(IDT_8T49N24X_XTAL_FREQ, freq, True)
def _set_clock(self, fin, fout, free_run):
if fin < IDT_8T49N24X_FIN_MIN:
raise RuntimeError("Input Frequency Below Minimum")
if fin > IDT_8T49N24X_FIN_MAX:
raise RuntimeError("Input Frequency Above Maximum")
if fout < IDT_8T49N24X_FOUT_MIN:
raise RuntimeError("Output Frequency Below Minimum")
if fout > IDT_8T49N24X_FOUT_MAX:
raise RuntimeError("Output Frequency Above Maximum")
settings = _calculate_settings(fin, fout)
self.enable(False)
if free_run:
self._reference_input(0, False)
self._reference_input(1, False)
self._mode(True)
else:
self._reference_input(0, True)
self._reference_input(1, False)
self._mode(False)
# Set up input clock
self._pre_divider(0, settings['Pre'])
self._pre_divider(1, settings['Pre'])
self._m1_feedback(0, settings['M1'])
self._m1_feedback(1, settings['M1'])
self._los(0, settings['LOS'])
self._los(1, settings['LOS'])
# FVCO configuration
self._dsm_int(settings['DSMInt'])
self._dsm_frac(settings['DSMFrac'])
# Output clock
self._output_divider(2, settings['NInt'])
self._output_divider(3, settings['NInt'])
self._output_divider_frac(2, settings['NFrac'])
self._output_divider_frac(3, settings['NFrac'])
self.enable(True)
def _reference_input(self, channel, enable):
if channel == 1:
shift = 5
else:
shift = 4
if enable:
value = 0
else:
value = 1 << shift
mask = 1 << shift
self._update(0x000a, value, mask)
def _mode(self, free_run):
if free_run:
self._update(0x000a, 0x31, 0x33)
self._update(0x0069, 0x08, 0x08)
else:
self._update(0x000a, 0x20, 0x33)
self._update(0x0069, 0x00, 0x08)
def _pre_divider(self, channel, value):
if channel == 1:
address = 0x000e
else:
address = 0x000b
self._write(address, (value >> 16) & 0x1F)
self._write(address + 1, (value >> 8) & 0xFF)
self._write(address + 2, value & 0xFF)
def _m1_feedback(self, channel, value):
if channel == 1:
address = 0x0011
else:
address = 0x0014
self._write(address, value >> 16)
self._write(address + 1, (value >> 8) & 0xFF)
self._write(address + 2, value & 0xFF)
def _los(self, channel, value):
if channel == 1:
address = 0x0074
else:
address = 0x0071
self._write(address, value >> 16)
self._write(address + 1, (value >> 8) & 0xFF)
self._write(address + 2, value & 0xFF)
def _dsm_int(self, value):
self._write(0x25, (value >> 8) & 0x01)
self._write(0x26, value & 0xFF)
def _dsm_frac(self, value):
self._write(0x28, (value >> 16) & 0x1F)
self._write(0x29, (value >> 8) & 0xFF)
self._write(0x2a, value & 0xFF)
def _output_divider(self, channel, value):
addresses = [0x003f, 0x0042, 0x0045, 0x0048]
address = addresses[channel]
self._write(address, (value >> 16) & 0x3)
self._write(address + 1, (value >> 8) & 0xFF)
self._write(address + 2, value & 0xFF)
def _output_divider_frac(self, channel, value):
addresses = [0x0000, 0x0057, 0x005b, 0x005f]
address = addresses[channel]
self._write(address, (value >> 24) & 0x0F)
self._write(address + 1, (value >> 16) & 0xFF)
self._write(address + 2, (value >> 8) & 0xFF)
self._write(address + 3, value & 0xFF)
class SI_5324C:
"""Driver for the SI 5324C series of clock generators
"""
def __init__(self, master, address):
"""Create a new instance of the SI_5324C driver
Parameters
----------
master : IIC master
IIC master the device is connected to
address : int
IIC address of the device
"""
self._master = master
self._address = address
self._buffer = _ffi.new("unsigned char [32]")
if not self.check_device_id():
raise RuntimeError("Could not find SI5324")
self.vals = [0 for _ in range(6)]
self.n1_min = 1
self.n1_max = 1
self.n1_hs = 1
self.nc_ls_min = 1
self.nc_ls_max = 1
self.nc_ls = 1
self.n2_hs = 1
self.n2_ls_min = 1
self.n2_ls_max = 1
self.n2_ls = 1
self.n3_min = 1
self.n3_max = 1
self.n3 = 1
self.best_n1_hs = 1
self.best_nc_ls = 1
self.best_n2_hs = 1
self.best_n2_ls = 1
self.best_n3 = 1
self.fin = 1
self.fout = 1
self.fosc = 1
self.best_fout_delta = 1
self.best_fout = 1
self.enable(False)
self._configure()
self.enable(True)
def _configure(self):
self._write(3, 0x15)
self._write(4, 0x92)
self._write(6, 0x2f)
self._write(10, 0x08)
self._write(11, 0x42)
self._write(19, 0x23)
self._write(137, 0x01)
def _read(self, reg_addr):
attempts = 0
while True:
try:
self._buffer[0] = reg_addr
self._master.send(self._address, self._buffer, 1, 1)
self._master.receive(self._address, self._buffer, 1, 0)
except Exception:
attempts += 1
if attempts > 100:
raise RuntimeError(
"Timeout when reading from address {}".format(reg_addr))
continue
break
return self._buffer[0]
def _write(self, reg_addr, value):
attempts = 0
while True:
try:
self._buffer[0] = reg_addr
self._buffer[1] = value
self._master.send(self._address, self._buffer, 2, 0)
except Exception:
attempts += 1
if attempts > 100:
raise RuntimeError(
"Timeout when writing to address {}".format(reg_addr))
continue
break
def _update(self, reg_addr, value, mask):
data = self._read(reg_addr)
data &= ~mask
data |= (value & mask)
self._write(reg_addr, data)
def check_device_id(self):
device_id = self._read(0x86) << 8
device_id |= self._read(0x87)
return device_id == 0x0182
def enable(self, active):
if active:
value = 0x00
else:
value = 0x01
self._update(0x0B, value, 0x01)
def set_clock(self, freq, line_rate):
self.enable(False)
self._set_clock(SI5324_CLKSRC_XTAL, SI5324_XTAL_FREQ, freq)
self.enable(True)
def _rate_approx(self, f):
h = np.array([0, 1, 0])
k = np.array([1, 0, 0])
n = 1
if self.n3_max <= 1:
self.n3 = 1
self.n2_ls = f >> 28
return
n = n << 28
for i in range(0, 28):
if (f % 2) == 0:
n = n//2
f = f//2
else:
break
d = f
for i in range(64):
if n:
a = d//n
else:
a = 0
if i and not a:
break
x = d
d = n
n = x % n
x = a
if k[1]*a+k[0] >= self.n3_max:
x = (self.n3_max-k[0])//k[1]
if not (x*2 >= a or k[1] >= self.n3_max):
break
h[2] = x*h[1]+h[0]
h[0] = h[1]
h[1] = h[2]
k[2] = x*k[1]+k[0]
k[0] = k[1]
k[1] = k[2]
self.n3 = k[1]
self.n2_ls = h[1]
def _find_n2ls(self):
result = 0
np.seterr(divide='ignore', invalid='ignore')
n2_ls_div_n3 = self.fosc//(self.fin >> 28)//self.n2_hs//2
self._rate_approx(n2_ls_div_n3)
self.n2_ls = self.n2_ls*2
if self.n2_ls < self.n2_ls_min:
mult = self.n2_ls_min % self.n2_ls
if mult == 1:
mult = mult+1
self.n2_ls = self.n2_ls*mult
self.n3 = self.n3*mult
if self.n3 < self.n3_min:
mult = self.n3_min % self.n3
if mult == 1:
mult = mult+1
self.n2_ls = self.n2_ls*mult
self.n3 = self.n3*mult
else:
f3_actual = self.fin//self.n3
fosc_actual = f3_actual * self.n2_hs * self.n2_ls
fout_actual = fosc_actual//(self.n1_hs * self.nc_ls)
delta_fout = fout_actual - self.fout
if f3_actual < (SI5324_F3_MIN << 28) or \
f3_actual > (SI5324_F3_MAX << 28):
pass
elif fosc_actual < (SI5324_FOSC_MIN << 28) or \
fosc_actual > (SI5324_FOSC_MAX << 28):
pass
elif fout_actual < (SI5324_FOUT_MIN << 28) or \
fout_actual > (SI5324_FOUT_MAX << 28):
pass
else:
if abs(delta_fout) < self.best_fout_delta:
self.best_n1_hs = self.n1_hs
self.best_nc_ls = self.nc_ls
self.best_n2_hs = self.n2_hs
self.best_n2_ls = self.n2_ls
self.best_n3 = self.n3
self.best_fout = fout_actual
self.best_fout_delta = abs(delta_fout)
if delta_fout == 0:
result = 1
return result
def _find_n2(self):
result = 0
for i in range(SI5324_N2_HS_MAX, SI5324_N2_HS_MIN-1, -1):
self.n2_hs = i
self.n2_ls_min = self.fosc//((SI5324_F3_MAX * i) << 28)
if self.n2_ls_min < SI5324_N2_LS_MIN:
self.n2_ls_min = SI5324_N2_LS_MIN
self.n2_ls_max = self.fosc//((SI5324_F3_MIN * i) << 28)
if self.n2_ls_max > SI5324_N2_LS_MAX:
self.n2_ls_max = SI5324_N2_LS_MAX
result = self._find_n2ls()
if result:
break
return result
def _calc_ncls_limits(self):
self.nc_ls_min = self.n1_min//self.n1_hs
if self.nc_ls_min < SI5324_NC_LS_MIN:
self.nc_ls_min = SI5324_NC_LS_MIN
if self.nc_ls_min > 1 and self.nc_ls_min & 0x1 == 1:
self.nc_ls_min = self.nc_ls_min+1
self.nc_ls_max = self.n1_max//self.n1_hs
if self.nc_ls_max > SI5324_NC_LS_MAX:
self.nc_ls_max = SI5324_NC_LS_MAX
if self.nc_ls_max & 0x1 == 1:
self.nc_ls_max = self.nc_ls_max-1
if self.nc_ls_max * self.n1_hs < self.n1_min or \
self.nc_ls_min * self.n1_hs > self.n1_max:
return -1
return 0
def _find_ncls(self):
fosc_1 = self.fout * self.n1_hs
result = 0
for i in range(self.nc_ls_max, self.nc_ls_max+1):
self.fosc = fosc_1 * i
self.nc_ls = i
result = self._find_n2()
if result:
break
if i == 1:
self.nc_ls = i+1
else:
self.nc_ls = i+2
return result
def _calc_freq_settings(self, clk_in_freq, clk_out_freq):
self.fin = clk_in_freq << 28
self.fout = clk_out_freq << 28
best_delta_fout = self.fout
self.n1_min = SI5324_FOSC_MIN//clk_out_freq
if self.n1_min < SI5324_N1_HS_MIN * SI5324_NC_LS_MIN:
self.n1_min = SI5324_N1_HS_MIN * SI5324_NC_LS_MIN
self.n1_max = SI5324_FOSC_MAX//clk_out_freq
if self.n1_max > SI5324_N1_HS_MAX * SI5324_NC_LS_MAX:
self.n1_max = SI5324_N1_HS_MAX * SI5324_NC_LS_MAX
self.n3_min = clk_in_freq//SI5324_F3_MAX
if self.n3_min < SI5324_N3_MIN:
self.n3_min = SI5324_N3_MIN
self.n3_max = clk_in_freq//SI5324_F3_MIN
if self.n3_max > SI5324_N3_MAX:
self.n3_max = SI5324_N3_MAX
for i in range(SI5324_N1_HS_MAX, SI5324_N1_HS_MIN-1, -1):
self.n1_hs = i
result = self._calc_ncls_limits()
if result:
continue
result = self._find_ncls()
if result:
break
if best_delta_fout == best_delta_fout//self.fout:
return SI5234_ERR_FREQ
self.vals[0] = self.best_n1_hs-4
self.vals[1] = self.best_nc_ls-1
self.vals[2] = self.best_n2_hs-4
self.vals[3] = self.best_n2_ls-1
self.vals[4] = self.best_n3-1
self.vals[5] = 6
return SI5324_SUCCESS
def _set_clock(self, clk_src, clk_in_freq, clk_out_freq):
buf = np.zeros(30, dtype=np.uint8)
if clk_src < SI5324_CLKSRC_CLK1 or clk_src > SI5324_CLKSRC_XTAL:
raise RuntimeError("Si5324 Error : Incorrect input clock selected")
if clk_src == SI5324_CLKSRC_CLK2:
raise RuntimeError("Si5324 Error : clock input 2 not supported")
if clk_in_freq < SI5324_FIN_MIN or clk_in_freq > SI5324_FIN_MAX:
raise RuntimeError("Si5324 Error :Input Frequency out of range")
if clk_out_freq < SI5324_FOUT_MIN or clk_out_freq > SI5324_FOUT_MAX:
raise RuntimeError("Si5324 ERROR: Output frequency out of range")
result = self._calc_freq_settings(clk_in_freq, clk_out_freq)
if result != SI5324_SUCCESS:
raise RuntimeError("Si5324 ERROR: Could not determine settings "
"for requested frequency")
i = 0
buf[i] = 0
if clk_src == SI5324_CLKSRC_XTAL:
buf[i+1] = 0x54
else:
buf[i+1] = 0x14
i = i+2
buf[i] = 2
buf[i+1] = (self.vals[5] << 4) | 0x02
i += 2
buf[i] = 11
if clk_src == SI5324_CLKSRC_CLK1:
buf[i+1] = 0x42
else:
buf[i+1] = 0x41
i += 2
buf[i] = 13
buf[i+1] = 0x2f
i += 2
buf[i] = 25
buf[i+1] = self.vals[0] << 5
i += 2
buf[i] = 31
buf[i+1] = (self.vals[1] & 0x000F0000) >> 16
buf[i+2] = 32
buf[i+3] = (self.vals[1] & 0x0000FF00) >> 8
buf[i+4] = 33
buf[i+5] = self.vals[1] & 0x000000FF
i += 6
buf[i] = 40
buf[i+1] = self.vals[2] << 5
temp = (self.vals[3] & 0x000F0000) >> 16
buf[i+1] = buf[i+1] | temp
buf[i+2] = 41
buf[i+3] = (self.vals[3] & 0x0000FF00) >> 8
buf[i+4] = 42
buf[i+5] = self.vals[3] & 0x000000FF
i += 6
if clk_src == SI5324_CLKSRC_CLK1:
buf[i] = 43
buf[i+2] = 44
buf[i+4] = 45
else:
buf[i] = 46
buf[i+2] = 47
buf[i+4] = 48
buf[i+1] = (self.vals[4] & 0x00070000) >> 16
buf[i+3] = (self.vals[4] & 0x0000FF00) >> 8
buf[i+5] = self.vals[4] & 0x000000FF
i += 6
buf[i] = 136
buf[i+1] = 0x40
i += 2
if i != buf.shape[0]:
return
for index in range(0, i, 2):
reg_addr = buf[index]
data = buf[index+1]
self._write(reg_addr, data)
return result
|
Xilinx/PYNQ
|
pynq/lib/video/clocks.py
|
Python
|
bsd-3-clause
| 24,085
|
[
"CRYSTAL"
] |
d25d103ff18229328242fbec3ead9b13e28e57a03266913f2c7efc1e23226721
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
# Create two points, P0 and P1
p0 = [1.0, 0.0, 0.0]
p1 = [0.0, 1.0, 0.0]
lineSource = vtk.vtkLineSource()
lineSource.SetPoint1(p0)
lineSource.SetPoint2(p1)
# Visualize
colors = vtk.vtkNamedColors()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(lineSource.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(4)
actor.GetProperty().SetColor(colors.GetColor3d("Peacock"))
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Line")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.SetBackground(colors.GetColor3d("Silver"))
renderer.AddActor(actor)
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/GeometricObjects/Line.py
|
Python
|
apache-2.0
| 1,017
|
[
"VTK"
] |
9f0c44f71bb2233434f84e213768c9001b0d0214b3d18acda5ef13226bfa490b
|
#!/usr/bin/env python
# coding: utf-8
# Example utilizing the **`LFPykit`** module (https://lfpykit.readthedocs.io,
# https://github.com/LFPy/LFPykit) for predictions of extracellular
# potentials using the line source approximation implementation
# `LineSourcePotential` with a passive neuron model set up in Arbor
# (https://arbor.readthedocs.io, https://github.com/arbor-sim/arbor).
#
# The neuron receives sinusoid synaptic current input in one arbitrary
# chosen control volume (CV).
# Its morphology is defined in the file `single_cell_detailed.swc`
# import modules
import sys
import numpy as np
import arbor
import lfpykit
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.collections import PolyCollection
import pandas as pd
class Recipe (arbor.recipe):
def __init__(self, cell):
super().__init__()
self.the_cell = cell
self.vprobe_id = (0, 0)
self.iprobe_id = (0, 1)
self.cprobe_id = (0, 2)
self.the_props = arbor.neuron_cable_properties()
self.the_cat = arbor.default_catalogue()
self.the_props.register(self.the_cat)
def num_cells(self):
return 1
def num_sources(self, gid):
return 0
def cell_kind(self, gid):
return arbor.cell_kind.cable
def cell_description(self, gid):
return self.the_cell
def global_properties(self, kind):
return self.the_props
def probes(self, gid):
return [
arbor.cable_probe_membrane_voltage_cell(),
arbor.cable_probe_total_current_cell(),
arbor.cable_probe_stimulus_current_cell()
]
# Read the SWC filename from input
# Example from docs: single_cell_detailed.swc
if len(sys.argv) < 2:
print("No SWC file passed to the program")
sys.exit(0)
filename = sys.argv[1]
# define morphology (needed for arbor.place_pwlin)
morphology = arbor.load_swc_arbor(filename)
# number of CVs per branch
nseg = 3
# Label dictionary
defs = {}
labels = arbor.label_dict(defs)
# decor
decor = arbor.decor()
# set initial voltage, temperature, axial resistivity, membrane capacitance
decor.set_property(
Vm=-65, # Initial membrane voltage [mV]
tempK=300, # Temperature [Kelvin]
rL=10000, # Axial resistivity [Ω cm]
cm=0.01, # Membrane capacitance [F/m**2]
)
# set passive mechanism all over
# passive mech w. leak reversal potential (mV)
pas = arbor.mechanism('pas/e=-65')
pas.set('g', 0.0001) # leak conductivity (S/cm2)
decor.paint('(all)', arbor.density(pas))
# set sinusoid input current at mid point of terminating CV (segment)
iclamp = arbor.iclamp(5, # stimulation onset (ms)
1E8, # stimulation duration (ms)
-0.001, # stimulation amplitude (nA)
frequency=0.1, # stimulation frequency (kHz)
phase=0) # stimulation phase)
try:
# arbor >= 0.5.2 fix
decor.place('(location 4 0.16667)', iclamp, '"iclamp"')
except TypeError:
decor.place('(location 4 0.16667)', iclamp)
# number of CVs per branch
policy = arbor.cv_policy_fixed_per_branch(nseg)
decor.discretization(policy)
# create cell and set properties
cell = arbor.cable_cell(morphology, labels, decor)
# create single cell model
model = arbor.single_cell_model(cell)
# instantiate recipe with cell
recipe = Recipe(cell)
# instantiate simulation
context = arbor.context()
domains = arbor.partition_load_balance(recipe, context)
sim = arbor.simulation(recipe, domains, context)
# set up sampling on probes
schedule = arbor.regular_schedule(0.1)
v_handle = sim.sample(recipe.vprobe_id, schedule, arbor.sampling_policy.exact)
i_handle = sim.sample(recipe.iprobe_id, schedule, arbor.sampling_policy.exact)
c_handle = sim.sample(recipe.cprobe_id, schedule, arbor.sampling_policy.exact)
# run simulation for 500 ms of simulated activity and collect results.
sim.run(tfinal=500)
# extract time, V_m and I_m for each compartment
V_m_samples, V_m_meta = sim.samples(v_handle)[0]
I_m_samples, I_m_meta = sim.samples(i_handle)[0]
I_c_samples, I_c_meta = sim.samples(c_handle)[0]
# drop recorded V_m values and corresponding meta data of
# zero-sized CVs (branch-point potentials)
inds = np.array([m.dist != m.prox for m in V_m_meta])
V_m_samples = V_m_samples[:, np.r_[True, inds]]
V_m_meta = np.array(V_m_meta)[inds].tolist()
# note: the cables comprising the metadata for each probe
# should be the same, as well as the reported sample times.
assert V_m_meta == I_m_meta
assert (V_m_samples[:, 0] == I_m_samples[:, 0]).all()
# prep recorded data for plotting
time = V_m_samples[:, 0]
V_m = V_m_samples[:, 1:].T
I_m = I_m_samples[:, 1:].T
I_c = I_c_samples[:, 1:].T
# gather geometry of CVs and assign segments to each CV
p = arbor.place_pwlin(morphology)
x, y, z, d = [np.array([], dtype=float).reshape((0, 2))] * 4
CV_ind = np.array([], dtype=int) # tracks which CV owns segment
for i, m in enumerate(I_m_meta):
segs = p.segments([m])
for j, seg in enumerate(segs):
x = np.row_stack([x, [seg.prox.x, seg.dist.x]])
y = np.row_stack([y, [seg.prox.y, seg.dist.y]])
z = np.row_stack([z, [seg.prox.z, seg.dist.z]])
d = np.row_stack([d, [seg.prox.radius * 2, seg.dist.radius * 2]])
CV_ind = np.r_[CV_ind, i]
###############################################################################
# compute extracellular potential using segment information
###############################################################################
cell_geometry = lfpykit.CellGeometry(
x=x,
y=y,
z=z,
d=d
)
# membrane voltages, transmemrbane current and corresponding times
cell_geometry.V_m = V_m # mV
# nA, sum stimulation and transmembrane current to mimic sinusoid synapse
cell_geometry.I_m = I_m + I_c
cell_geometry.time = time # ms
# locations where extracellular potential is predicted
dx = 1
dz = 1
axis = np.round([x.min() - 10, x.max() + 10, y.min() - 10, y.max() + 10])
# axis = np.round(axis)
X, Y = np.meshgrid(np.linspace(axis[0], axis[1], int(np.diff(axis[:2]) // dx) + 1),
np.linspace(axis[2], axis[3], int(np.diff(axis[2:]) // dz) + 1))
Z = np.zeros_like(X)
# LineSourcePotential object, get mapping for all segments per CV
lsp = lfpykit.LineSourcePotential(cell=cell_geometry,
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten())
M_tmp = lsp.get_transformation_matrix()
# Define response matrix from M with columns weighted by area of each frusta
M = np.zeros((lsp.x.size, I_m.shape[0]))
for i in range(I_m.shape[0]):
inds = CV_ind == i
M[:, i] = M_tmp[:, inds] @ (cell_geometry.area[inds] /
cell_geometry.area[inds].sum())
# Extracellular potential using segment information at last time step
# in x,y-plane coordinates
V_e = M @ cell_geometry.I_m[:, -1]
# ## Plotting
# Plot the morphology and extracellular potential prediction
def create_polygon(x, y, d):
"""create a outline for each segment defined by 1D arrays `x`, `y`, `d`
in x,y-plane which can be drawn using `plt.Polygon`
Parameters
----------
x: ndarray
y: ndarray
d: ndarray
Returns
-------
x, y: nested list
"""
# calculate angles
dx = abs(np.diff(x))
dy = np.diff(y)
theta = np.arctan2(dy, dx)
x = np.r_[x, x[::-1]]
y = np.r_[y, y[::-1]]
theta = np.r_[theta, theta[::-1]]
d = np.r_[d, d[::-1]]
# 1st corner:
x[0] -= 0.5 * d[0] * np.sin(theta[0])
y[0] += 0.5 * d[0] * np.cos(theta[0])
# points between start and end of section, first side
x[1:dx.size] -= 0.25 * d[1:dx.size] * (
np.sin(theta[:dx.size - 1]) + np.sin(theta[1:dx.size]))
y[1:dy.size] += 0.25 * d[1:dy.size] * (
np.cos(theta[:dy.size - 1]) + np.cos(theta[1:dx.size]))
# end of section, first side
x[dx.size] -= 0.5 * d[dx.size] * np.sin(theta[dx.size])
y[dy.size] += 0.5 * d[dy.size] * np.cos(theta[dy.size])
# end of section, second side
x[dx.size + 1] += 0.5 * d[dx.size + 1] * np.sin(theta[dx.size])
y[dy.size + 1] -= 0.5 * d[dy.size + 1] * np.cos(theta[dy.size])
# points between start and end of section, second side
x[::-1][1:dx.size] += 0.25 * d[::-1][1:dx.size] * (
np.sin(theta[::-1][:dx.size - 1]) + np.sin(theta[::-1][1:dx.size]))
y[::-1][1:dy.size] -= 0.25 * d[::-1][1:dy.size] * (
np.cos(theta[::-1][:dy.size - 1]) + np.cos(theta[::-1][1:dx.size]))
# last corner:
x[-1] += 0.5 * d[-1] * np.sin(theta[-1])
y[-1] -= 0.5 * d[-1] * np.cos(theta[-1])
return list(zip(x, y))
def colorbar(fig, ax, im,
width=0.01,
height=1.0,
hoffset=0.01,
voffset=0.0,
orientation='vertical'):
'''
draw matplotlib colorbar without resizing the axes object
'''
rect = np.array(ax.get_position().bounds)
rect = np.array(ax.get_position().bounds)
caxrect = [0] * 4
caxrect[0] = rect[0] + rect[2] + hoffset * rect[2]
caxrect[1] = rect[1] + voffset * rect[3]
caxrect[2] = rect[2] * width
caxrect[3] = rect[3] * height
cax = fig.add_axes(caxrect)
cb = fig.colorbar(im, cax=cax, orientation=orientation)
return cb
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
# plot pcolormesh plot of V_e
im_V_e = ax.pcolormesh(X, Y, V_e.reshape(X.shape),
shading='auto', cmap='RdBu',
vmin=-abs(V_e).max() / 2, vmax=abs(V_e).max() / 2,
zorder=0)
cb = colorbar(fig, ax, im_V_e, height=0.45, voffset=0.55)
cb.set_label('$V_e$ (mV)')
# add outline of each CV
norm = plt.Normalize(vmin=-66, vmax=-64)
colors = [plt.cm.viridis(norm(v)) for v in cell_geometry.V_m[:, -1]]
zips = []
for i in range(I_m.shape[0]):
inds = CV_ind == i
zips.append(create_polygon(x[inds, ].flatten(),
y[inds, ].flatten(), d[inds, ].flatten()))
polycol = PolyCollection(zips,
edgecolors='k',
facecolors=colors,
linewidths=0.5,
zorder=2)
im_V_m = ax.add_collection(polycol)
cb2 = colorbar(fig, ax, im_V_m, height=0.45)
cb2.set_ticks([0, 0.5, 1])
cb2.set_ticklabels([-66, -65, -64])
cb2.set_label(r'$V_m$ (mV)')
ax.set_xlim(X.min(), X.max())
ax.set_ylim(Y.min(), Y.max())
ax.set_xlabel(r'$x$ ($\mu$m)')
ax.set_ylabel(r'$y$ ($\mu$m)')
fig.savefig('single_cell_extracellular_potentials.svg', bbox_inches='tight')
# ## Notes on output:
# The spatial discretization is here deliberately coarse with only 3 CVs per branch.
# Hence the branch receiving input about 1/6 of the way from its root
# (from `decor.place('(location 4 0.16667)', iclamp)`) is treated
# as 3 separate line sources with homogeneous current density per length unit each, even if
# the diameter and direction varies with position due to the fact
# that each CV may be composed of multiple segments.
#
# The parameter `nseg = 3` above can be changed above to affect the number
# of CVs per branch.
|
halfflat/nestmc-proto
|
python/example/single_cell_extracellular_potentials.py
|
Python
|
bsd-3-clause
| 11,127
|
[
"NEURON"
] |
ef1c610ec20c3660e6bcf3c0092562af6c7260b77ac6513d0fe7738d9ce95eab
|
import lldb
max_depth = 6
#filters = {'_view': 'UIView *', '_layer': 'CALayer *', '_viewFlags': 'struct'}
filters = {'_view': 'UIView *'}
def print_value(var, depth, prefix):
""" print values and recurse """
global max_depth
local_depth = max_depth - depth
pad = ' ' * local_depth
name = var.GetName()
typ = str(var.GetType()).split('\n')[0].split('{')[0].split(':')[0].strip()
found = name in filters.keys() # only visit filter items children
if found:
found = (filters.get(name) == typ)
value = var.GetValue()
if value is None or str(value) == '0x00000000':
value = ''
else:
value = ' Val: %s' % value
if var.GetNumChildren() == 0 and var.IsInScope():
path = lldb.SBStream()
var.GetExpressionPath(path)
path = ' pathData: %s' % path.GetData()
else:
path = ''
print('^' * local_depth, prefix, ' Adr:', var.GetAddress(), ' Name:', name, ' Type:', typ, value, path)
if var.GetNumChildren() > 0:
if local_depth < 2 or found:
print(pad, var.GetNumChildren(), 'children, to depth', local_depth + 1)
counter = 0
for subvar in var:
subprefix = '%d/%d' % (counter, var.GetNumChildren())
print_value(subvar, depth - 1, subprefix)
counter += 1
def prect(debugger, command_line, result, dict):
""" print rect dimensions """
print("Rect dimensions")
args = command_line.split()
if len(args) > 0:
var = lldb.frame.FindVariable(args[0])
print(var.GetName())
print(var.GetValue())
def printvh(debugger, command_line, result, dict):
""" print view hierarchy """
print("View hierarchy:")
global max_depth
args = command_line.split()
if len(args) > 0:
var = lldb.frame.FindVariable(args[0])
depth = max_depth
if len(args) > 1:
depth = int(args[1])
max_depth = depth
print_value(var, depth, 'ROOT')
else:
print("pass a variable name and optional depth")
|
mauricerkelly/dotfiles
|
development/lldbpy.symlink/views.py
|
Python
|
mit
| 2,085
|
[
"VisIt"
] |
a41d70be4a7031a1655d284dd7113b73573341aff4ec44973703b2b0db2fcc42
|
"""
SystemLoggingHandler is the implementation of the Logging service
in the DISET framework.
The following methods are available in the Service interface::
addMessages()
"""
__RCSID__ = "$Id$"
from types import ListType, StringTypes
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.FrameworkSystem.private.logging.Message import tupleToMessage
from DIRAC.FrameworkSystem.DB.SystemLoggingDB import SystemLoggingDB
# This is a global instance of the SystemLoggingDB class
gLogDB = False
def initializeSystemLoggingHandler( serviceInfo ):
""" Check that we can connect to the DB and that the tables are properly created or updated
"""
global gLogDB
gLogDB = SystemLoggingDB()
res = gLogDB._connect()
if not res['OK']:
return res
res = gLogDB._checkTable()
if not res['OK'] and not res['Message'] == 'The requested table already exist':
return res
return S_OK()
class SystemLoggingHandler( RequestHandler ):
""" This is server
"""
def __addMessage( self, messageObject, site, nodeFQDN ):
"""
This is the function that actually adds the Message to
the log Database
"""
credentials = self.getRemoteCredentials()
if credentials.has_key( 'DN' ):
userDN = credentials['DN']
else:
userDN = 'unknown'
if credentials.has_key( 'group' ):
userGroup = credentials['group']
else:
userGroup = 'unknown'
remoteAddress = self.getRemoteAddress()[0]
return gLogDB.insertMessage( messageObject, site, nodeFQDN, userDN, userGroup, remoteAddress )
types_addMessages = [ ListType, StringTypes, StringTypes ]
#A normal exported function (begins with export_)
def export_addMessages( self, messagesList, site, nodeFQDN ):
"""
This is the interface to the service
Inputs:
msgList contains a list of Message Objects.
Outputs:
S_OK if no exception was raised
S_ERROR if an exception was raised
"""
for messageTuple in messagesList:
messageObject = tupleToMessage( messageTuple )
result = self.__addMessage( messageObject, site, nodeFQDN )
if not result['OK']:
gLogger.error( 'The Log Message could not be inserted into the DB',
'because: "%s"' % result['Message'] )
return S_ERROR( result['Message'] )
return S_OK()
|
vmendez/DIRAC
|
FrameworkSystem/Service/SystemLoggingHandler.py
|
Python
|
gpl-3.0
| 2,488
|
[
"DIRAC"
] |
52d158b7f0081f06142812f68c8d4dce11a2d77223174cf8bd7198f405ae5f7d
|
import json
import py
class DummyS3Connection(object):
_temp_path = py.path.local('/tmp')
def __init__(self, **kwargs):
pass
def get_bucket(self, name):
b = DummyS3Bucket(name=name, path=self._temp_path.join(name))
return b
def get_all_buckets(self):
for p in self._temp_path.listdir():
if not p.isdir():
continue
yield DummyS3Bucket(name=p.basename, path=p)
def __iter__(self):
return self.get_all_buckets()
class LoggingStatus(object):
def __init__(self, **kwargs):
self.target = kwargs.get('target')
self.prefix = kwargs.get('prefix')
if self.target is not None or self.prefix is not None:
self.LoggingEnabled = ''
class DummyResult(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
class DummyS3Bucket(object):
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.path = kwargs.get('path')
d = self._read_logging_status()
self.logging_status = LoggingStatus(**d)
def _read_logging_status(self):
p = self.path.join('.logging_status.json')
if not p.exists():
return {}
return json.loads(p.read())
def get_logging_status(self):
return self.logging_status
def list(self, prefix=None):
return self.get_all_keys(prefix)
def get_all_keys(self, prefix=None):
for p in self.path.visit():
if p.isdir():
continue
c = p.common(self.path)
keyname = '/'.join([self.path.basename, p.basename])
if prefix is not None and not keyname.startswith(prefix):
continue
yield DummyS3Key(p, bucket=self, name=keyname)
def delete_keys(self, keys):
result = DummyResult(deleted=[], errors=[])
for key in keys:
key.delete()
result.deleted.append(DummyResult(key=key.name))
return result
class DummyS3Key(object):
def __init__(self, filename, bucket=None, name=None):
if not isinstance(filename, py.path.local):
filename = py.path.local(filename)
self.filename = filename
self.bucket = bucket
self.name = name
def get_contents_as_string(self):
return self.filename.read()
def delete(self):
self.filename.remove()
|
nocarryr/s3-logparser
|
tests/utils.py
|
Python
|
gpl-3.0
| 2,444
|
[
"VisIt"
] |
cda5007175744a556420a61e31a28509466430b8b204df0d3a6125412be100f2
|
import collect_array as ca
import collect_transformation_info as cti
from processing import collect_device as cd
class FindLoopArrays(object):
def __init__(self):
self.loop_arrays = dict()
self.loop_arrays_parent = dict()
def collect(self, ast):
arr_to_ref = ca.ArrayNameToRef()
arr_to_ref.visit(ast)
self.loop_arrays = arr_to_ref.LoopArrays
self.loop_arrays_parent = arr_to_ref.LoopArraysParent
class FindKernelName(cti.FindArrayIdsKernel):
def __init__(self):
super(FindKernelName, self).__init__()
self.KernelName = None
self.DevId = dict()
self.DevFuncId = None
self.DevFuncTypeId = None
self.DevArgList = list()
self.Mem = dict()
self.Worksize = dict()
def collect(self, ast):
super(FindKernelName, self).collect(ast)
find_device_args = cd.FindDeviceArgs()
find_device_args.visit(ast)
self.DevArgList = find_device_args.arglist
find_function = cd.FindFunction()
find_function.visit(ast)
self.DevFuncTypeId = find_function.typeid
self.DevFuncId = self.DevFuncTypeId.name.name
kernel_name = self.DevFuncTypeId.name.name
for n in self.ArrayIds:
self.DevId[n] = 'dev_ptr' + n
self.Mem[n] = 'hst_ptr' + n + '_mem_size'
self.KernelName = kernel_name + 'Kernel'
self.Worksize['local'] = kernel_name + '_local_worksize'
self.Worksize['global'] = kernel_name + '_global_worksize'
self.Worksize['offset'] = kernel_name + '_global_offset'
|
dikujepsen/OpenTran
|
v2.0/framework/unused/collect_boilerplate_info_unused.py
|
Python
|
mit
| 1,612
|
[
"VisIt"
] |
938ea0ede095daa9d14a501a6ec1ae419e938c126fbb0cf9ebb5b3648e689a0a
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska and the SALib team
This class holds the Fourier Amplitude Sensitivity Test (FAST) based on Cukier et al. (1973) and Saltelli et al. (1999):
Cukier, R. I., Fortuin, C. M., Shuler, K. E., Petschek, A. G. and Schaibly, J. H.: Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients. I Theory, J. Chem. Phys., 59(8), 3873–3878, 1973.
Saltelli, A., Tarantola, S. and Chan, K. P.-S.: A Quantitative Model-Independent Method for Global Sensitivity Analysis of Model Output, Technometrics, 41(1), 39–56, doi:10.1080/00401706.1999.10485594, 1999.
The presented code is based on SALib
Copyright (C) 2013-2015 Jon Herman and others. Licensed under the GNU Lesser General Public License.
The Sensitivity Analysis Library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
The Sensitivity Analysis Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with the Sensitivity Analysis Library. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
import time
import math
class fast(_algorithm):
'''
Implements the Fourier Amplitude Sensitivity Test algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
'''
def __init__(self, spot_setup, dbname=None, dbformat=None, parallel='seq', save_sim=True):
_algorithm.__init__(self, spot_setup, dbname=dbname,
dbformat=dbformat, parallel=parallel, save_sim=save_sim)
def scale_samples(self, params, bounds):
'''
Rescales samples in 0-to-1 range to arbitrary bounds.
Arguments:
bounds - list of lists of dimensions num_params-by-2
params - numpy array of dimensions num_params-by-N,
where N is the number of samples
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# sample * (upper_bound - lower_bound) + lower_bound
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
def matrix(self, bounds, N, M=4):
D = len(bounds)
omega = np.empty([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Discretization of the frequency space, s
s = (2 * math.pi / N) * np.arange(N)
# Transformation to get points in the X space
X = np.empty([N * D, D])
omega2 = np.empty([D])
for i in range(D):
omega2[i] = omega[0]
idx = list(range(i)) + list(range(i + 1, D))
omega2[idx] = omega[1:]
l = range(i * N, (i + 1) * N)
# random phase shift on [0, 2pi) following Saltelli et al.
# Technometrics 1999
phi = 2 * math.pi * np.random.rand()
for j in range(D):
g = 0.5 + (1 / math.pi) * \
np.arcsin(np.sin(omega2[j] * s + phi))
X[l, j] = g
self.scale_samples(X, bounds)
return X
def analyze(self, problem, Y, D, parnames, M=4, print_to_console=False):
if len(Y.shape) > 1:
Y = Y.flatten()
print(Y.size)
if Y.size % (D) == 0:
N = int(Y.size / D)
else:
print("""
Error: Number of samples in model output file must be a multiple of D,
where D is the number of parameters in your parameter file.
""")
exit()
# Recreate the vector omega used in the sampling
omega = np.empty([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Calculate and Output the First and Total Order Values
if print_to_console:
print("Parameter First Total")
Si = dict((k, [None] * D) for k in ['S1', 'ST'])
for i in range(D):
l = np.arange(i * N, (i + 1) * N)
Si['S1'][i] = self.compute_first_order(Y[l], N, M, omega[0])
Si['ST'][i] = self.compute_total_order(Y[l], N, omega[0])
if print_to_console:
print("%s %f %f" %
(parnames[i], Si['S1'][i], Si['ST'][i]))
return Si
def compute_first_order(self, outputs, N, M, omega):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, int(N / 2))]) / N, 2)
V = 2 * np.sum(Sp)
D1 = 2 * np.sum(Sp[np.arange(1, M + 1) * int(omega) - 1])
return D1 / V
def compute_total_order(self, outputs, N, omega):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, int(N / 2))]) / N, 2)
V = 2 * np.sum(Sp)
Dt = 2 * sum(Sp[np.arange(int(omega / 2))])
return (1 - Dt / V)
def sample(self, repetitions):
"""
Samples from the FAST algorithm.
Input
----------
repetitions: int
Maximum number of runs.
"""
print('Creating FAST Matrix')
# Get the names of the parameters to analyse
names = self.parameter()['name']
# Get the minimum and maximum value for each parameter from the
# distribution
parmin, parmax = self.parameter()['minbound'], self.parameter()[
'maxbound']
# Create an Matrix to store the parameter sets
N = int(math.ceil(float(repetitions) / float(len(parmin))))
bounds = []
for i in range(len(parmin)):
bounds.append([parmin[i], parmax[i]])
Matrix = self.matrix(bounds, N, M=4)
print('Start sampling')
starttime = time.time()
intervaltime = starttime
# A generator that produces the parameters
#param_generator = iter(Matrix)
firstcall = True
param_generator = (
(rep, Matrix[rep]) for rep in range(len(Matrix)))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate the objective function
like = self.objectivefunction(
evaluation=self.evaluation, simulation=simulations)
self.status(rep, like, randompar)
if firstcall == True:
self.initialize_database(randompar, self.parameter()['name'], simulations, like)
firstcall = False
# Save everything in the database
self.datawriter.save(like, randompar, simulations=simulations)
# Progress bar
acttime = time.time()
# Refresh progressbar every second
if acttime - intervaltime >= 2:
text = '%i of %i (best like=%g)' % (
rep, len(Matrix), self.status.objectivefunction)
print(text)
intervaltime = time.time()
self.repeat.terminate()
text = '%i of %i (best like=%g)' % (
self.status.rep, repetitions, self.status.objectivefunction)
print(text)
text = 'Duration:' + str(round((acttime - starttime), 2)) + ' s'
print(text)
try:
self.datawriter.finalize()
data = self.datawriter.getdata()
# this is likely to crash if database does not assign name 'like1'
Si = self.analyze(
bounds, data['like1'], len(bounds), names, print_to_console=True)
except AttributeError: # Happens if no database was assigned
pass
|
p-lauer/spotpy
|
spotpy/algorithms/fast.py
|
Python
|
mit
| 10,126
|
[
"Gaussian"
] |
558403072ca91268ccf319825efd95ea609115832c73c04f871d79c044d5b516
|
import unittest
import scipy
import pysal
import numpy as np
from pysal.spreg import error_sp as SP
class TestBaseGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Error(self.y, self.X, self.w.sparse)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_array_almost_equal(reg.betas,betas,4)
u = np.array([ 27.4739775])
np.testing.assert_array_almost_equal(reg.u[0],u,4)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
n = 49
self.assertAlmostEqual(reg.n,n,4)
k = 3
self.assertAlmostEqual(reg.k,k,4)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x[0],x,4)
e = np.array([ 31.89620319])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,4)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 191.73716465732355
self.assertAlmostEqual(reg.sig2,sig2,4)
class TestGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Error(self.y, self.X, self.w)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_array_almost_equal(reg.betas,betas,4)
u = np.array([ 27.4739775])
np.testing.assert_array_almost_equal(reg.u[0],u,4)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
n = 49
self.assertAlmostEqual(reg.n,n,4)
k = 3
self.assertAlmostEqual(reg.k,k,4)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x[0],x,4)
e = np.array([ 31.89620319])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,4)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 191.73716465732355
self.assertAlmostEqual(reg.sig2,sig2,4)
pr2 = 0.3495097406012179
self.assertAlmostEqual(reg.pr2,pr2)
std_err = np.array([ 12.32416094, 0.4989716 , 0.1785863 ])
np.testing.assert_array_almost_equal(reg.std_err,std_err,4)
z_stat = np.array([[ 3.89022140e+00, 1.00152805e-04], [ 1.41487186e+00, 1.57106070e-01], [ -3.11175868e+00, 1.85976455e-03]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestBaseGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Endog_Error(self.y, self.X, self.yd, self.q, self.w.sparse)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_array_almost_equal(reg.betas,betas,4)
u = np.array([ 26.55951566])
np.testing.assert_array_almost_equal(reg.u[0],u,4)
e = np.array([ 31.23925425])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,4)
predy = np.array([ 53.9074875])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,4)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x[0],x,4)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z[0],z,4)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
#std_y
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
#vm
vm = np.array([[ 5.29158422e+02, -1.57833675e+01, -8.38021080e+00],
[ -1.57833675e+01, 5.40235041e-01, 2.31120327e-01],
[ -8.38021080e+00, 2.31120327e-01, 1.44977385e-01]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 192.50022721929574
self.assertAlmostEqual(reg.sig2,sig2,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Endog_Error(self.y, self.X, self.yd, self.q, self.w)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_array_almost_equal(reg.betas,betas,4)
u = np.array([ 26.55951566])
np.testing.assert_array_almost_equal(reg.u[0],u,4)
e = np.array([ 31.23925425])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,4)
predy = np.array([ 53.9074875])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,4)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x[0],x,4)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z[0],z,4)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 5.29158422e+02, -1.57833675e+01, -8.38021080e+00],
[ -1.57833675e+01, 5.40235041e-01, 2.31120327e-01],
[ -8.38021080e+00, 2.31120327e-01, 1.44977385e-01]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
pr2 = 0.346472557570858
self.assertAlmostEqual(reg.pr2,pr2)
sig2 = 192.50022721929574
self.assertAlmostEqual(reg.sig2,sig2,4)
std_err = np.array([ 23.003401 , 0.73500657, 0.38075777])
np.testing.assert_array_almost_equal(reg.std_err,std_err,4)
z_stat = np.array([[ 2.40664208, 0.01609994], [ 0.63144305, 0.52775088], [-1.75659016, 0.07898769]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestBaseGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = SP.BaseGM_Combo(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse)
betas = np.array([[ 57.61123461],[ 0.73441314], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_array_almost_equal(reg.betas,betas,4)
u = np.array([ 25.57932637])
np.testing.assert_array_almost_equal(reg.u[0],u,4)
e_filtered = np.array([ 31.65374945])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,4)
predy = np.array([ 54.88767663])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x[0],x,4)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z[0],z,4)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([ 5.22438365e+02, 2.38012873e-01, 3.20924172e-02,
2.15753599e-01])
np.testing.assert_array_almost_equal(np.diag(reg.vm),vm,4)
sig2 = 181.78650186468832
self.assertAlmostEqual(reg.sig2,sig2,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = SP.GM_Combo(self.y, self.X, w=self.w)
e_reduced = np.array([ 28.18617481])
np.testing.assert_array_almost_equal(reg.e_pred[0],e_reduced,4)
predy_e = np.array([ 52.28082782])
np.testing.assert_array_almost_equal(reg.predy_e[0],predy_e,4)
betas = np.array([[ 57.61123515],[ 0.73441313], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_array_almost_equal(reg.betas,betas,4)
u = np.array([ 25.57932637])
np.testing.assert_array_almost_equal(reg.u[0],u,4)
e_filtered = np.array([ 31.65374945])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,4)
predy = np.array([ 54.88767685])
np.testing.assert_array_almost_equal(reg.predy[0],predy,4)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x[0],x,4)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z[0],z,4)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([ 5.22438333e+02, 2.38012875e-01, 3.20924173e-02,
2.15753579e-01])
np.testing.assert_array_almost_equal(np.diag(reg.vm),vm,4)
sig2 = 181.78650186468832
self.assertAlmostEqual(reg.sig2,sig2,4)
pr2 = 0.3018280166937799
self.assertAlmostEqual(reg.pr2,pr2,4)
pr2_e = 0.3561355586759414
self.assertAlmostEqual(reg.pr2_e,pr2_e,4)
std_err = np.array([ 22.85692222, 0.48786559, 0.17914356, 0.46449318])
np.testing.assert_array_almost_equal(reg.std_err,std_err,4)
z_stat = np.array([[ 2.52051597e+00, 1.17182922e-02], [ 1.50535954e+00, 1.32231664e-01], [ -3.31909311e+00, 9.03103123e-04], [ -4.68530506e-01, 6.39405261e-01]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,4)
if __name__ == '__main__':
unittest.main()
|
spreg-git/pysal
|
pysal/spreg/tests/test_error_sp.py
|
Python
|
bsd-3-clause
| 14,705
|
[
"COLUMBUS"
] |
05cc8383f8633cd7d5bcbf280251d6ecc6200cd40a43d3e73012f69f59fe97fb
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array
from ..utils.validation import DataConversionWarning
from .base import BaseEnsemble, _partition_estimators
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
random_state = check_random_state(tree.random_state)
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
tree.indices_ = sample_counts > 0.
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree.tree_, 'apply', X)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Convert data
# ensure_2d=False because there are actually unit test checking we fail
# for 1d. FIXME make this consistent in the future.
X = check_array(X, dtype=DTYPE, ensure_2d=False, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = self._validate_y(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return y
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / self.n_estimators
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
sample_indices = np.arange(n_samples)
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
mask_indices = sample_indices[mask]
p_estimator = estimator.predict_proba(X[mask_indices, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][mask_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y(self, y):
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, ensure_2d=False, accept_sparse="csr")
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
sample_indices = np.arange(n_samples)
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
mask_indices = sample_indices[mask]
p_estimator = estimator.predict(X[mask_indices, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask_indices, :] += p_estimator
n_predictions[mask_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficieny.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficieny.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
hitszxp/scikit-learn
|
sklearn/ensemble/forest.py
|
Python
|
bsd-3-clause
| 55,028
|
[
"Brian"
] |
cd82c4d3acd26272158e745af9be834542ce89acc6e516925a3eeaec747e2545
|
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import m5
from m5 import internal
from m5.internal.stats import schedStatEvent as schedEvent
from m5.objects import Root
from m5.util import attrdict, fatal
outputList = []
def initText(filename, desc=True):
output = internal.stats.initText(filename, desc)
outputList.append(output)
def initSimStats():
internal.stats.initSimStats()
internal.stats.registerPythonStatsHandlers()
names = []
stats_dict = {}
stats_list = []
raw_stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
__dynamic_cast = []
for k, v in internal.stats.__dict__.iteritems():
if k.startswith('dynamic_'):
__dynamic_cast.append(v)
for stat in internal.stats.statsList():
for cast in __dynamic_cast:
val = cast(stat)
if val is not None:
stats_list.append(val)
raw_stats_list.append(val)
break
else:
fatal("unknown stat type %s", stat)
for stat in stats_list:
if not stat.check() or not stat.baseCheck():
fatal("statistic '%s' (%d) was not properly initialized " \
"by a regStats() function\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
def less(stat1, stat2):
v1 = stat1.name.split('.')
v2 = stat2.name.split('.')
return v1 < v2
stats_list.sort(less)
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
internal.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
for stat in stats_list:
stat.prepare()
lastDump = 0
def dump():
'''Dump all statistics data to the registered outputs'''
curTick = m5.curTick()
global lastDump
assert lastDump <= curTick
if lastDump == curTick:
return
lastDump = curTick
internal.stats.processDumpQueue()
prepare()
for output in outputList:
if output.valid():
output.begin()
for stat in stats_list:
output.visit(stat)
output.end()
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
for obj in root.descendants(): obj.resetStats()
# call any other registered stats reset callbacks
for stat in stats_list:
stat.reset()
internal.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
|
austinharris/gem5-riscv
|
src/python/m5/stats/__init__.py
|
Python
|
bsd-3-clause
| 4,584
|
[
"VisIt"
] |
8473f6730b39409281bbfab66df2d5e844c75ae8263ed61fb49ea2c85d6bae5e
|
# -*- encoding:utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class DataCenter(models.Model):
"""
A data center is a mapper to backend openstack cluster
the config of project/user/password is for cloud-web api
to create project and user
"""
id = models.AutoField(primary_key=True)
name = models.CharField(_("Name"), max_length=255)
host = models.CharField(_(u"openstack host"), null=False, blank=False, max_length=255, unique=True, help_text=_(u"IP of Compute Center"))
project = models.CharField(_(u"default project"), null=False, blank=False, max_length=255,
help_text=_(u"Project Name of Data Center,recommended: admin"))
user = models.CharField(_(u"default project user"), null=False, blank=False, max_length=255,
help_text=_(u"User who can visit the project"))
password = models.CharField(_(u"default user password"), null=False, blank=False, max_length=255)
auth_url = models.CharField(_(u"usually http://host:5000/v2.0"), null=False, blank=False, max_length=255)
ext_net = models.CharField(_(u"External Network Name"), null=False, blank=False, max_length=255, default="net04_ext")
@classmethod
def get_default(cls):
try:
return cls.objects.filter().order_by('id')[0]
except:
return None
def __unicode__(self):
return self.name
class Meta:
db_table = "data_center"
verbose_name = _("DataCenter")
verbose_name_plural = _("DataCenter")
class UserDataCenter(models.Model):
"""
An user data center just like the project in openstack,
when user registed in cloud-web we'll automatic create a project
with name "project-%(user-id)s"
"""
id = models.AutoField(primary_key=True)
data_center = models.ForeignKey(DataCenter)
user = models.ForeignKey('auth.User')
tenant_name = models.CharField(_("Tenant"), max_length=255)
tenant_uuid = models.CharField(_("Tenant UUID"), max_length=64)
keystone_user = models.CharField(_("User"), max_length=255)
keystone_password = models.CharField(_("Password"), max_length=255)
def __unicode__(self):
return "%s-%s" % (self.data_center.name, self.user.username)
class Meta:
db_table = "user_data_center"
verbose_name = _("UserDataCenter")
verbose_name_plural = _("UserDataCenter")
|
eoncloud-dev/eonboard
|
eoncloud_web/biz/idc/models.py
|
Python
|
apache-2.0
| 2,462
|
[
"VisIt"
] |
e24473e8b608ec5decac561782836ceb49cc26443373787ffc0d3a7017c157d4
|
# -*- coding: utf-8 -*-
"""Make hetionet exports."""
import os
from random import choice
import click
import networkx as nx
import pandas as pd
from tqdm.autonotebook import tqdm
from pybel import (
from_nodelink_gz,
get_hetionet,
to_bel_script,
to_bel_script_gz,
to_graphdati_file,
to_graphdati_gz,
to_graphdati_jsonl_gz,
to_nodelink_gz,
)
from pybel.canonicalize import edge_to_bel
from pybel.struct.summary.edge_summary import get_metaedge_to_key
@click.command()
@click.option(
"--directory",
default=os.getcwd(),
required=True,
show_default=True,
type=click.Path(dir_okay=True),
)
def main(directory: str):
"""Make hetionet exports."""
path = os.path.join(directory, "hetionet.bel.nodelink.json.gz")
if not os.path.exists(path):
graph = get_hetionet()
to_nodelink_gz(graph, path)
else:
click.echo("loading pickle from {}".format(path))
graph = from_nodelink_gz(path)
output_bel_gz_path = os.path.join(directory, "hetionet.bel.gz")
if not os.path.exists(output_bel_gz_path):
click.echo("outputting whole hetionet as BEL GZ to {}".format(output_bel_gz_path))
to_bel_script_gz(graph, output_bel_gz_path, use_identifiers=True)
output_graphdati_jsonl_gz_path = os.path.join(directory, "hetionet.bel.graphdati.jsonl.gz")
if not os.path.exists(output_graphdati_jsonl_gz_path):
click.echo("outputting whole hetionet as BEL GraphDati JSONL GZ to {}".format(output_graphdati_jsonl_gz_path))
to_graphdati_jsonl_gz(graph, output_graphdati_jsonl_gz_path, use_identifiers=True)
output_graphdati_gz_path = os.path.join(directory, "hetionet.bel.graphdati.json.gz")
if not os.path.exists(output_graphdati_gz_path):
click.echo("outputting whole hetionet as BEL GraphDati JSON GZ to {}".format(output_graphdati_gz_path))
to_graphdati_gz(graph, output_graphdati_gz_path, use_identifiers=True)
summary_tsv_path = os.path.join(directory, "hetionet_summary.tsv")
if not os.path.exists(summary_tsv_path):
click.echo("getting metaedges")
rows = []
keep_keys = set()
for value in get_metaedge_to_key(graph).values():
u, v, key = choice(list(value))
keep_keys.add(key)
d = graph[u][v][key]
bel = edge_to_bel(u, v, d, use_identifiers=True)
rows.append((key[:8], bel))
df = pd.DataFrame(rows, columns=["key", "bel"])
df.to_csv(summary_tsv_path, sep="\t", index=False)
non_sample_edges = [
(u, v, k, d)
for u, v, k, d in tqdm(
graph.edges(keys=True, data=True),
desc="Getting non-sample edges to remove",
)
if k not in keep_keys
]
click.echo("Removing non-sample edges")
graph.remove_edges_from(non_sample_edges)
graph.remove_nodes_from(list(nx.isolates(graph)))
sample_bel_path = os.path.join(directory, "hetionet_sample.bel")
click.echo("outputting sample hetionet in BEL to {}".format(sample_bel_path))
to_bel_script(graph, sample_bel_path, use_identifiers=True)
sample_graphdati_path = os.path.join(directory, "hetionet_sample.bel.graphdati.json")
click.echo("outputting sample hetionet in BEL to {}".format(sample_bel_path))
to_graphdati_file(graph, sample_graphdati_path, use_identifiers=True, indent=2)
if __name__ == "__main__":
main()
|
pybel/pybel
|
src/pybel/io/hetionet/cli.py
|
Python
|
mit
| 3,495
|
[
"Pybel"
] |
a2f448564fde0d32c41d3c6115972baf5567777b59cd5ac3c2f03288393a0104
|
from base.twilltestcase import TwillTestCase
from functional import database_contexts
import galaxy.model
from base.test_db_util import (
get_user,
get_private_role,
get_all_histories_for_user,
get_latest_history_for_user,
get_default_history_permissions_by_history,
get_latest_dataset,
refresh,
flush,
get_group_by_name,
get_role_by_name,
get_user_group_associations_by_group,
get_default_history_permissions_by_role,
get_default_user_permissions_by_role,
get_user_role_associations_by_role,
get_group_role_associations_by_group,
get_dataset_permissions_by_role,
get_group_role_associations_by_role,
)
# Globals setup by these tests.
regular_user1 = regular_user2 = regular_user3 = admin_user = None
role_one = role_two = role_three = None
group_zero = group_one = group_two = None
class TestDataSecurity( TwillTestCase ):
def test_000_initiate_users( self ):
"""Ensuring all required user accounts exist"""
self.logout()
self.login( email='test1@bx.psu.edu', username='regular-user1' )
global regular_user1
regular_user1 = get_user( 'test1@bx.psu.edu' )
assert regular_user1 is not None, 'Problem retrieving user with email "test1@bx.psu.edu" from the database'
self.logout()
self.login( email='test2@bx.psu.edu', username='regular-user2' )
global regular_user2
regular_user2 = get_user( 'test2@bx.psu.edu' )
assert regular_user2 is not None, 'Problem retrieving user with email "test2@bx.psu.edu" from the database'
self.logout()
self.login( email='test@bx.psu.edu', username='admin-user' )
global admin_user
admin_user = get_user( 'test@bx.psu.edu' )
assert admin_user is not None, 'Problem retrieving user with email "test@bx.psu.edu" from the database'
def test_005_create_new_user_account_as_admin( self ):
"""Testing creating a new user account as admin"""
# Logged in as admin_user
email = 'test3@bx.psu.edu'
password = 'testuser'
# Test setting the user name to one that is already taken. Note that the account must not exist in order
# for this test to work as desired, so the email we're passing is important...
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email='diff@you.com',
password=password,
username='admin-user',
redirect='' )
if not username_taken:
error_msg = "The public name (%s) is already being used by another user, but no error was displayed" % 'admin-user'
raise AssertionError( error_msg )
# Test setting the user name to an invalid one. Note that the account must not exist in order
# for this test to work as desired, so the email we're passing is important...
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email='diff@you.com',
password=password,
username='h',
redirect='' )
if not invalid_username:
raise AssertionError( "The public name (%s) is is invalid, but no error was displayed" % 'diff@you.com' )
previously_created, username_taken, invalid_username = self.create_new_account_as_admin( email=email,
password=password,
username='regular-user3',
redirect='' )
# Get the user object for later tests
global regular_user3
regular_user3 = get_user( email )
assert regular_user3 is not None, 'Problem retrieving user with email "%s" from the database' % email
global regular_user3_private_role
regular_user3_private_role = get_private_role( regular_user3 )
# Make sure DefaultUserPermissions were created
if not regular_user3.default_permissions:
raise AssertionError( 'No DefaultUserPermissions were created for user %s when the admin created the account' % email )
# Make sure a private role was created for the user
if not regular_user3.roles:
raise AssertionError( 'No UserRoleAssociations were created for user %s when the admin created the account' % email )
if not previously_created and len( regular_user3.roles ) != 1:
raise AssertionError( '%d UserRoleAssociations were created for user %s when the admin created the account ( should have been 1 )' \
% ( len( regular_user3.roles ), regular_user3.email ) )
for ura in regular_user3.roles:
role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( ura.role_id )
if not previously_created and role.type != 'private':
raise AssertionError( 'Role created for user %s when the admin created the account is not private, type is' \
% str( role.type ) )
if not previously_created:
# Make sure a history was not created ( previous test runs may have left deleted histories )
histories = get_all_histories_for_user( regular_user3 )
if histories:
raise AssertionError( 'Histories were incorrectly created for user %s when the admin created the account' % email )
# Make sure the user was not associated with any groups
if regular_user3.groups:
raise AssertionError( 'Groups were incorrectly associated with user %s when the admin created the account' % email )
def test_010_reset_password_as_admin( self ):
"""Testing reseting a user password as admin"""
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testreset' )
def test_015_login_after_password_reset( self ):
"""Testing logging in after an admin reset a password - tests DefaultHistoryPermissions for accounts created by an admin"""
# logged in as admin_user
self.logout()
self.login( email=regular_user3.email, password='testreset' )
# Make sure a History and HistoryDefaultPermissions exist for the user
latest_history = get_latest_history_for_user( regular_user3 )
if not latest_history.user_id == regular_user3.id:
raise AssertionError( 'A history was not created for user %s when he logged in' % regular_user3.email )
if not latest_history.default_permissions:
raise AssertionError( 'No DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
dhps = get_default_history_permissions_by_history( latest_history )
if len( dhps ) > 1:
raise AssertionError( 'More than 1 DefaultHistoryPermissions were created for history id %d when it was created' % latest_history.id )
dhp = dhps[0]
if not dhp.action == galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DefaultHistoryPermission.action for history id %d is "%s", but it should be "manage permissions"' \
% ( latest_history.id, dhp.action ) )
# Upload a file to create a HistoryDatasetAssociation
self.upload_file( '1.bed' )
latest_dataset = get_latest_dataset()
for dp in latest_dataset.actions:
# Should only have 1 DatasetPermissions
if dp.action != galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action:
raise AssertionError( 'The DatasetPermissions for dataset id %d is %s ( should have been %s )' \
% ( latest_dataset.id,
latest_dataset.actions.action,
galaxy.model.Dataset.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) )
self.logout()
# Reset the password to the default for later tests
self.login( email='test@bx.psu.edu' )
self.reset_password_as_admin( user_id=self.security.encode_id( regular_user3.id ), password='testuser' )
def test_020_mark_user_deleted( self ):
"""Testing marking a user account as deleted"""
# Logged in as admin_user
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
if not regular_user3.active_histories:
raise AssertionError( 'HistoryDatasetAssociations for regular_user3 were incorrectly deleted when the user was marked deleted' )
def test_025_undelete_user( self ):
"""Testing undeleting a user account"""
# Logged in as admin_user
self.undelete_user( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
def test_030_create_role( self ):
"""Testing creating new role with 3 members ( and a new group named the same ), then renaming the role"""
# Logged in as admin_user
name = 'Role One'
description = "This is Role Ones description"
in_user_ids = [ str( admin_user.id ), str( regular_user1.id ), str( regular_user3.id ) ]
in_group_ids = []
# Add 1 to the number of associated groups since we are creating a new one with the same name as the role
num_gras = len( in_group_ids ) + 1
self.create_role( name=name,
description=description,
in_user_ids=in_user_ids,
in_group_ids=in_group_ids,
create_group_for_role='yes',
private_role=admin_user.email,
strings_displayed=[ "Role '%s' has been created with %d associated users and %d associated groups." % ( name, len( in_user_ids ), num_gras ),
"One of the groups associated with this role is the newly created group with the same name." ] )
# Get the role object for later tests
global role_one
role_one = database_contexts.galaxy_context.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name == name ).first()
assert role_one is not None, 'Problem retrieving role named "Role One" from the database'
# Make sure UserRoleAssociations are correct
if len( role_one.users ) != len( in_user_ids ):
raise AssertionError( '%d UserRoleAssociations were created for role id %d when it was created ( should have been %d )' \
% ( len( role_one.users ), role_one.id, len( in_user_ids ) ) )
# Each of the following users should now have 2 role associations, their private role and role_one
for user in [ admin_user, regular_user1, regular_user3 ]:
refresh( user )
if len( user.roles ) != 2:
raise AssertionError( '%d UserRoleAssociations are associated with user %s ( should be 2 )' \
% ( len( user.roles ), user.email ) )
# Make sure the group was created
self.visit_url( '%s/admin/groups' % self.url )
self.check_page_for_string( name )
global group_zero
group_zero = get_group_by_name( name )
# Rename the role
rename = "Role One's been Renamed"
new_description = "This is Role One's Re-described"
self.rename_role( self.security.encode_id( role_one.id ), name=rename, description=new_description )
self.visit_url( '%s/admin/roles' % self.url )
self.check_page_for_string( rename )
self.check_page_for_string( new_description )
# Reset the role back to the original name and description
self.rename_role( self.security.encode_id( role_one.id ), name=name, description=description )
def test_035_create_group( self ):
"""Testing creating new group with 3 members and 2 associated roles, then renaming it"""
# Logged in as admin_user
name = "Group One's Name"
in_user_ids = [ str( admin_user.id ), str( regular_user1.id ), str( regular_user3.id ) ]
in_role_ids = [ str( role_one.id ) ]
# The number of GroupRoleAssociations should be 2, role_one and the newly created role named 'Group One's Name'
num_gras = len( in_role_ids ) + 1
self.create_group( name=name,
in_user_ids=in_user_ids,
in_role_ids=in_role_ids,
create_role_for_group=True,
strings_displayed=[ "Group '%s' has been created with %d associated users and %d associated roles." % ( name, len( in_user_ids ), num_gras ),
"One of the roles associated with this group is the newly created role with the same name." ] )
# Get the group object for later tests
global group_one
group_one = get_group_by_name( name )
assert group_one is not None, 'Problem retrieving group named "Group One" from the database'
# Make sure UserGroupAssociations are correct
if len( group_one.users ) != len( in_user_ids ):
raise AssertionError( '%d UserGroupAssociations were created for group id %d when it was created ( should have been %d )' \
% ( len( group_one.users ), group_one.id, len( in_user_ids ) ) )
# Each user should now have 1 group association, group_one
for user in [ admin_user, regular_user1, regular_user3 ]:
refresh( user )
if len( user.groups ) != 1:
raise AssertionError( '%d UserGroupAssociations are associated with user %s ( should be 1 )' % ( len( user.groups ), user.email ) )
# Make sure GroupRoleAssociations are correct
if len( group_one.roles ) != num_gras:
raise AssertionError( '%d GroupRoleAssociations were created for group id %d when it was created ( should have been %d )' \
% ( len( group_one.roles ), group_one.id, num_gras ) )
# Rename the group
rename = "Group One's been Renamed"
self.rename_group( self.security.encode_id( group_one.id ), name=rename, )
self.visit_url( '%s/admin/groups' % self.url )
self.check_page_for_string( rename )
# Reset the group back to the original name
self.rename_group( self.security.encode_id( group_one.id ), name=name )
def test_040_add_members_and_role_to_group( self ):
"""Testing editing user membership and role associations of an existing group"""
# Logged in as admin_user
name = 'Group Two'
self.create_group( name=name, in_user_ids=[], in_role_ids=[] )
# Get the group object for later tests
global group_two
group_two = get_group_by_name( name )
assert group_two is not None, 'Problem retrieving group named "Group Two" from the database'
# group_two should have no associations
if group_two.users:
raise AssertionError( '%d UserGroupAssociations were created for group id %d when it was created ( should have been 0 )' \
% ( len( group_two.users ), group_two.id ) )
if group_two.roles:
raise AssertionError( '%d GroupRoleAssociations were created for group id %d when it was created ( should have been 0 )' \
% ( len( group_two.roles ), group_two.id ) )
user_ids = [ str( regular_user1.id ) ]
role_ids = [ str( role_one.id ) ]
self.associate_users_and_roles_with_group( self.security.encode_id( group_two.id ),
group_two.name,
user_ids=user_ids,
role_ids=role_ids )
def test_045_create_role_with_user_and_group_associations( self ):
"""Testing creating a role with user and group associations"""
# Logged in as admin_user
# NOTE: To get this to work with twill, all select lists on the ~/admin/role page must contain at least
# 1 option value or twill throws an exception, which is: ParseError: OPTION outside of SELECT
# Due to this bug in twill, we create the role, we bypass the page and visit the URL in the
# associate_users_and_groups_with_role() method.
name = 'Role Two'
description = 'This is Role Two'
user_ids = [ str( admin_user.id ) ]
group_ids = [ str( group_two.id ) ]
private_role = admin_user.email
# Create the role
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=group_ids,
private_role=private_role )
# Get the role object for later tests
global role_two
role_two = get_role_by_name( name )
assert role_two is not None, 'Problem retrieving role named "Role Two" from the database'
# Make sure UserRoleAssociations are correct
if len( role_two.users ) != len( user_ids ):
raise AssertionError( '%d UserRoleAssociations were created for role id %d when it was created with %d members' \
% ( len( role_two.users ), role_two.id, len( user_ids ) ) )
# admin_user should now have 3 role associations, private role, role_one, role_two
refresh( admin_user )
if len( admin_user.roles ) != 3:
raise AssertionError( '%d UserRoleAssociations are associated with user %s ( should be 3 )' % ( len( admin_user.roles ), admin_user.email ) )
# Make sure GroupRoleAssociations are correct
refresh( role_two )
if len( role_two.groups ) != len( group_ids ):
raise AssertionError( '%d GroupRoleAssociations were created for role id %d when it was created ( should have been %d )' \
% ( len( role_two.groups ), role_two.id, len( group_ids ) ) )
# group_two should now be associated with 2 roles: role_one, role_two
refresh( group_two )
if len( group_two.roles ) != 2:
raise AssertionError( '%d GroupRoleAssociations are associated with group id %d ( should be 2 )' % ( len( group_two.roles ), group_two.id ) )
def test_050_change_user_role_associations( self ):
"""Testing changing roles associated with a user"""
# Logged in as admin_user
# Create a new role with no associations
name = 'Role Three'
description = 'This is Role Three'
user_ids = []
group_ids = []
private_role = admin_user.email
self.create_role( name=name,
description=description,
in_user_ids=user_ids,
in_group_ids=group_ids,
private_role=private_role )
# Get the role object for later tests
global role_three
role_three = get_role_by_name( name )
assert role_three is not None, 'Problem retrieving role named "Role Three" from the database'
# Associate the role with a user
refresh( admin_user )
role_ids = []
for ura in admin_user.non_private_roles:
role_ids.append( str( ura.role_id ) )
role_ids.append( str( role_three.id ) )
group_ids = []
for uga in admin_user.groups:
group_ids.append( str( uga.group_id ) )
strings_displayed = [ "User '%s' has been updated with %d associated roles and %d associated groups" % \
( admin_user.email, len( role_ids ), len( group_ids ) ) ]
self.manage_roles_and_groups_for_user( self.security.encode_id( admin_user.id ),
in_role_ids=role_ids,
in_group_ids=group_ids,
strings_displayed=strings_displayed )
refresh( admin_user )
# admin_user should now be associated with 4 roles: private, role_one, role_two, role_three
if len( admin_user.roles ) != 4:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 4 )' % \
( len( admin_user.roles ), admin_user.email ) )
def test_055_mark_group_deleted( self ):
"""Testing marking a group as deleted"""
# Logged in as admin_user
self.browse_groups( strings_displayed=[ group_two.name ] )
self.mark_group_deleted( self.security.encode_id( group_two.id ), group_two.name )
refresh( group_two )
if not group_two.deleted:
raise AssertionError( '%s was not correctly marked as deleted.' % group_two.name )
# Deleting a group should not delete any associations
if not group_two.members:
raise AssertionError( '%s incorrectly lost all members when it was marked as deleted.' % group_two.name )
if not group_two.roles:
raise AssertionError( '%s incorrectly lost all role associations when it was marked as deleted.' % group_two.name )
def test_060_undelete_group( self ):
"""Testing undeleting a deleted group"""
# Logged in as admin_user
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
refresh( group_two )
if group_two.deleted:
raise AssertionError( '%s was not correctly marked as not deleted.' % group_two.name )
def test_065_mark_role_deleted( self ):
"""Testing marking a role as deleted"""
# Logged in as admin_user
self.browse_roles( strings_displayed=[ role_two.name ] )
self.mark_role_deleted( self.security.encode_id( role_two.id ), role_two.name )
refresh( role_two )
if not role_two.deleted:
raise AssertionError( '%s was not correctly marked as deleted.' % role_two.name )
# Deleting a role should not delete any associations
if not role_two.users:
raise AssertionError( '%s incorrectly lost all user associations when it was marked as deleted.' % role_two.name )
if not role_two.groups:
raise AssertionError( '%s incorrectly lost all group associations when it was marked as deleted.' % role_two.name )
def test_070_undelete_role( self ):
"""Testing undeleting a deleted role"""
# Logged in as admin_user
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
def test_075_purge_user( self ):
"""Testing purging a user account"""
# Logged in as admin_user
self.mark_user_deleted( user_id=self.security.encode_id( regular_user3.id ), email=regular_user3.email )
refresh( regular_user3 )
self.purge_user( self.security.encode_id( regular_user3.id ), regular_user3.email )
refresh( regular_user3 )
if not regular_user3.purged:
raise AssertionError( 'User %s was not marked as purged.' % regular_user3.email )
# Make sure DefaultUserPermissions deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.default_permissions ) != 1:
raise AssertionError( 'DefaultUserPermissions for user %s were not deleted.' % regular_user3.email )
for dup in regular_user3.default_permissions:
role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( dup.role_id )
if role.type != 'private':
raise AssertionError( 'DefaultUserPermissions for user %s are not related with the private role.' % regular_user3.email )
# Make sure History deleted
for history in regular_user3.histories:
refresh( history )
if not history.deleted:
raise AssertionError( 'User %s has active history id %d after their account was marked as purged.' % ( regular_user3.email, history.id ) )
# NOTE: Not all hdas / datasets will be deleted at the time a history is deleted - the cleanup_datasets.py script
# is responsible for this.
# Make sure UserGroupAssociations deleted
if regular_user3.groups:
raise AssertionError( 'User %s has active group after their account was marked as purged.' % ( regular_user3.email ) )
# Make sure UserRoleAssociations deleted EXCEPT FOR THE PRIVATE ROLE
if len( regular_user3.roles ) != 1:
raise AssertionError( 'UserRoleAssociations for user %s were not deleted.' % regular_user3.email )
for ura in regular_user3.roles:
role = database_contexts.galaxy_context.query( galaxy.model.Role ).get( ura.role_id )
if role.type != 'private':
raise AssertionError( 'UserRoleAssociations for user %s are not related with the private role.' % regular_user3.email )
def test_080_manually_unpurge_user( self ):
"""Testing manually un-purging a user account"""
# Logged in as admin_user
# Reset the user for later test runs. The user's private Role and DefaultUserPermissions for that role
# should have been preserved, so all we need to do is reset purged and deleted.
# TODO: If we decide to implement the GUI feature for un-purging a user, replace this with a method call
regular_user3.purged = False
regular_user3.deleted = False
flush( regular_user3 )
def test_085_purge_group( self ):
"""Testing purging a group"""
# Logged in as admin_user
self.mark_group_deleted( self.security.encode_id( group_two.id ), group_two.name )
self.purge_group( self.security.encode_id( group_two.id ), group_two.name )
# Make sure there are no UserGroupAssociations
if get_user_group_associations_by_group( group_two ):
raise AssertionError( "Purging the group did not delete the UserGroupAssociations for group_id '%s'" % group_two.id )
# Make sure there are no GroupRoleAssociations
if get_group_role_associations_by_group( group_two ):
raise AssertionError( "Purging the group did not delete the GroupRoleAssociations for group_id '%s'" % group_two.id )
# Undelete the group for later test runs
self.undelete_group( self.security.encode_id( group_two.id ), group_two.name )
def test_090_purge_role( self ):
"""Testing purging a role"""
# Logged in as admin_user
self.mark_role_deleted( self.security.encode_id( role_two.id ), role_two.name )
self.purge_role( self.security.encode_id( role_two.id ), role_two.name )
# Make sure there are no UserRoleAssociations
if get_user_role_associations_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the UserRoleAssociations for role_id '%s'" % role_two.id )
# Make sure there are no DefaultUserPermissions associated with the Role
if get_default_user_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DefaultUserPermissions for role_id '%s'" % role_two.id )
# Make sure there are no DefaultHistoryPermissions associated with the Role
if get_default_history_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DefaultHistoryPermissions for role_id '%s'" % role_two.id )
# Make sure there are no GroupRoleAssociations
if get_group_role_associations_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the GroupRoleAssociations for role_id '%s'" % role_two.id )
# Make sure there are no DatasetPermissionss
if get_dataset_permissions_by_role( role_two ):
raise AssertionError( "Purging the role did not delete the DatasetPermissionss for role_id '%s'" % role_two.id )
def test_095_manually_unpurge_role( self ):
"""Testing manually un-purging a role"""
# Logged in as admin_user
# Manually unpurge, then undelete the role for later test runs
# TODO: If we decide to implement the GUI feature for un-purging a role, replace this with a method call
role_two.purged = False
flush( role_two )
self.undelete_role( self.security.encode_id( role_two.id ), role_two.name )
def test_999_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to pass"""
# Logged in as admin_user
##################
# Eliminate all non-private roles
##################
for role in [ role_one, role_two, role_three ]:
self.mark_role_deleted( self.security.encode_id( role.id ), role.name )
self.purge_role( self.security.encode_id( role.id ), role.name )
# Manually delete the role from the database
refresh( role )
database_contexts.galaxy_context.delete( role )
database_contexts.galaxy_context.flush()
##################
# Eliminate all groups
##################
for group in [ group_zero, group_one, group_two ]:
self.mark_group_deleted( self.security.encode_id( group.id ), group.name )
self.purge_group( self.security.encode_id( group.id ), group.name )
# Manually delete the group from the database
refresh( group )
database_contexts.galaxy_context.delete( group )
database_contexts.galaxy_context.flush()
##################
# Make sure all users are associated only with their private roles
##################
for user in [ admin_user, regular_user1, regular_user2, regular_user3 ]:
refresh( user )
if len( user.roles) != 1:
raise AssertionError( '%d UserRoleAssociations are associated with %s ( should be 1 )' % ( len( user.roles ), user.email ) )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/functional/test_admin_features.py
|
Python
|
gpl-3.0
| 30,801
|
[
"Galaxy",
"VisIt"
] |
c2ad50ffbe5230a4b95a5967098a67263cfee42c77866c1b72441f18b41ce31d
|
#!/usr/bin/env python
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
usage = """usage: BuildHeaderTest.py <module_name> <module_source_path> <module_binary_path> <maximum_number_of_headers>
This script generates a a source file designed to check the headers in each
module. The generated HeaderTest can be found in the module binary 'test'
directory in a file itk<module_name>HeaderTest#.cxx. This contains a null
main(), but includes all the classes in the module. The primary purpose of this
test is to make sure there are not missing module dependencies. It also tests
for syntax and missing #include's.
"""
# Headers to not test because of dependecy issues, etc.
BANNED_HEADERS = set(('itkExceptionObject.h', # There is a pre-processor check so people use itkMacro.h instead.
'itkFFTWForwardFFTImageFilter.h',
'itkFFTWInverseFFTImageFilter.h',
'itkFFTWRealToHalfHermitianForwardFFTImageFilter.h',
'itkFFTWHalfHermitianToRealInverseFFTImageFilter.h',
'itkFFTWComplexToComplexFFTImageFilter.h',
'itkFFTWCommon.h',
'itkPyBuffer.h', # needs Python.h, etc
'itkVanHerkGilWermanErodeDilateImageFilter.h', # circular include's
'itkBSplineDeformableTransform.h', # deprecated
'vtkCaptureScreen.h', # these includes require VTK
'itkBSplineDeformableTransformInitializer.h'))
HEADER = """/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// This file has been generated by BuildHeaderTest.py
// To regenerate, build the ITKHeaderTests target.
// This is a test to include each header file for Insight.
"""
TRAILER = """
#include <cstdlib> // needed for EXIT_SUCCESS macro
int main ( int , char* [] )
{
return EXIT_SUCCESS;
}
"""
import glob
import os
import sys
if len(sys.argv) < 6:
print(usage)
sys.exit(1)
def main():
module_name = sys.argv[1]
module_source_path = sys.argv[2]
module_binary_path = sys.argv[3]
maximum_number_of_headers = int(sys.argv[4])
test_num = int(sys.argv[5])
# Get all the header files.
include_dir = os.path.join(module_source_path, 'include')
h_files = glob.glob(os.path.join(include_dir, '*.h'))
h_files = [os.path.basename(h) for h in h_files]
added_header_idx = maximum_number_of_headers * (test_num - 1)
test_source_path = os.path.join(module_binary_path, 'test')
if not os.path.exists(test_source_path):
os.makedirs(test_source_path)
test_source_file = os.path.join(test_source_path,
str(module_name) + 'HeaderTest' + str(test_num) + '.cxx')
test_src = open(test_source_file, 'w')
try:
test_src.write(HEADER)
if added_header_idx + maximum_number_of_headers > len(h_files):
max_idx = added_header_idx + len(h_files) % maximum_number_of_headers
else:
max_idx = added_header_idx + maximum_number_of_headers
for i in range(added_header_idx, max_idx):
# Use the .hxx if possible.
hxx_file = h_files[i][:-1] + 'hxx'
# Files that include VTK headers need to link to VTK.
if h_files[i] in BANNED_HEADERS or h_files[i].lower().find('vtk') != -1:
to_include = '// #include "' + h_files[i] + '" // Banned in BuildHeaderTest.py\n'
elif os.path.exists(os.path.join(module_source_path, 'include',
hxx_file)):
to_include = '#include "' + hxx_file + '"\n'
else:
to_include = '#include "' + h_files[i] + '"\n'
test_src.write(to_include)
test_src.write(TRAILER)
finally:
test_src.close()
return 0
if __name__ == "__main__":
ret = main()
sys.exit(ret)
|
atsnyder/ITK
|
Utilities/Maintenance/BuildHeaderTest.py
|
Python
|
apache-2.0
| 5,160
|
[
"VTK"
] |
7d15c04094790aaed0d1dde6adac0634ca6b88270c08b533b9f4388fdc3a8a7e
|
import argparse, os, sys, csv, heapq, string
from automaton import *
from bs4 import BeautifulSoup
done = set()
parser = argparse.ArgumentParser(description='Crawl Google Auto-Complete Query')
parser.add_argument('filename')
parser.add_argument('--query', action='append')
parser.add_argument('--prefix', action='append')
parser.add_argument('--include', action='append')
parser.add_argument('--exclude', action='append')
args = parser.parse_args()
if args.query:
queries = list(args.query)
else:
queries = [letter for letter in string.lowercase]
if os.path.exists(args.filename):
with open(args.filename, 'rb') as fptr:
for line in csv.reader(fptr):
query = line[0]
result = line[1]
done.add(query)
heapq.heappush(queries, result[:(len(query) + 1)])
print 'Crawled:', len(done)
comm = connect()
comm.setup()
comm.visit('http://www.google.com')
comm.wait()
lines = ['var el = document.getElementById("gbqfq");',
'el.focus();',
'var evt = document.createEvent("KeyboardEvent");',
'evt.initKeyboardEvent ("keypress", true, true, window, 0, 0, 0, 0, 0, "a".charCodeAt(0));']
for line in lines:
comm.Runtime.evaluate(expression=line)
comm.wait()
with open(args.filename, 'ab') as fptr:
data = csv.writer(fptr)
while queries:
query = heapq.heappop(queries)
if query in done:
continue
if args.prefix:
if not any(query.startswith(prefix) for prefix in args.prefix):
continue
if args.include:
if not any(inc in query for inc in args.include):
continue
if args.exclude:
if any(exc in query for exc in args.exclude):
continue
comm.Runtime.evaluate(expression='el.value = "{0}";'.format(query))
comm.Runtime.evaluate(expression='el.dispatchEvent(evt);')
soup = BeautifulSoup(comm.html())
try:
# TODO: This will fail on:
# "adding google calendar to microsoft o..."
# Google changes the html structure for long queries.
options = [el.find('td').text for el in soup.findAll(attrs={'class': 'gssb_a gbqfsf'})]
done.add(query)
print query
for option in options:
data.writerow([query, option])
for option in options:
heapq.heappush(queries, option[:(len(query) + 1)])
except:
pass
|
conceptcreative/free_grants_community
|
autocomplete/crawl_google_autocomplete.py
|
Python
|
mit
| 2,542
|
[
"VisIt"
] |
b8c2cb3f38f6e32657cfef7327f0f719b8418e526ab4cf4fb2421061db334a36
|
from __future__ import with_statement, print_function
__doc__ = \
"""
This module defines an ASE interface to ParaGauss.
"""
import os, sys
from os.path import basename
from os.path import isfile
import numpy as np2
from numpy import array, any
from ase.gxfile import gxread, gxwrite
from ase.units import Bohr, Hartree
import shlex, subprocess
from general import Calculator
def print_error (*args):
print (*args, file=sys.stderr)
class ParaGauss:
"""
Class for doing ParaGauss calculations.
ParaGauss needs at least one input file, which gives to it all the
needed parameters
"""
def __init__(self,
input = "input",
cmdline = "runpg /users/alexei/exe/openmpi/mainscf_V3.1.4b7-64",
silence = True,
optimizer = None,
copy_input = "always"
):
"""
Parameters
==========
|input|
name of the input file wich contains all the informations
ParaGauss needs
|cmdline|
Shell command to start ParaGauss, it will be executed in
working directory. A typical command line reads:
runpg /users/alexei/exe/openmpi/mainscf_V3.1.4
|silence|
if True (is as default) ParaGauss stdout will go to a
separate file if False it would go to the normal stdout
|optimizer|
If optimizer input is needed for a ParaGauss single point
calculation the programm takes the content from optimizer
and provides it as optimizer.input in the directory the
calculation runs
|copy_input|
Allows three different modes:
always
(is the default) will create new input file from
storage each time a quantum chemistry calculation
starts
never
will never create an input file
inexistent
will create a new input file for a quantum chemistry
calculation if it finds that the file does not exist
Both always and inexistent will fetch the input file
they will create lateron in the current working
directory during initalization
"""
self.input = input
# Command line is stored internally as list of arguments:
if type (cmdline) == type (""):
cmdline = shlex.split (cmdline)
self.cmdline = cmdline
self.silence = silence
assert (copy_input in ["always", "never", "inexistent"])
self.copy_input = copy_input
self.converged = False
# I am getting tired of this voodoo, FIXME: factor this out
# into a function:
if input.startswith("i."):
# e.g. i.h2o:
input_base_name = input[2:]
elif input.endswith(".scm") or input.endswith(".nml"):
input_base_name = input[:-4]
else:
# e.g. input, or anything else:
input_base_name = input
if input_base_name == "input":
self.output = "output"
else:
self.output = "o." + input_base_name + "/output"
# store metadata here, it might be needed (even in another
# directory)
self.data = {}
if not self.copy_input == "never":
with open (self.input, "r") as file:
self.inputstring = file.read()
if optimizer is None:
self.optimizer = None
else:
with open (optimizer, "r") as file:
self.optimizer = file.read()
# print self.inputstring
self.atnums = None
# there may be a gxfile from gxoptimizer we must not disturb
# its internal coordinates
if os.path.exists('gxfile'):
self.atnums, __, self.data["isyms"], self.data["inums"], self.data["iconns"], self.data["ivars"], \
self.data["additional"], __, __, loop = gxread('gxfile')
# We compare against None in self.report() and elsewhere:
self.__energy = None
def update(self, atoms):
"""
Decides whether and how to calculate energy and forces if the
stored positions have not changed nothing is done if start or
change in the settings, an initialization will take place.
"""
if (not self.converged or
len(self.atnums) != len(atoms) or
(self.atnums != atoms.get_atomic_numbers()).any()):
self.initialize(atoms)
self.calculate(atoms)
elif ((self.positions != atoms.get_positions()).any()):
self.calculate(atoms)
def initialize(self, atoms):
self.converged = False
def get_potential_energy(self, atoms, force_consistent=False):
"""
Makes sure energy (and forces) are up to date and afterwards
gives energy back (energy is energy calculated with ParaGauss
in atomic units (energy transformed to ASE units))
"""
self.update(atoms)
if self.__energy is None:
print_error ("ERROR: (ParaGauss) no energy available")
print_error ("Aborting.")
raise Exception("ParaGauss: no energy available")
return self.__energy * Hartree
def get_forces(self, atoms):
"""
Same as get_potential_energy() but for forces units are
transformed
"""
self.update(atoms)
if self.__grads == None :
print_error ("ERROR: (ParaGauss) no forces available!")
print_error ("Try enabling geometry optimization, by setting OPERATIONS_GEO_OPT = true")
print_error ("and setting MAX_GEO_ITERATION = 0")
print_error ("Aborting.")
raise Exception("ParaGauss: no forces available")
# note that the forces are negative of the energy gradients:
return -self.__grads * Hartree / Bohr
def get_stress(self, atoms):
raise NotImplementedError
def calculate (self, atoms):
"""
Calculates the energy and forces with ParaGauss. Uses gxfile
to comunicate with the rest of the system.
"""
# read in actual positions and atomic numbers
self.positions = atoms.get_positions().copy()
atnums = atoms.get_atomic_numbers().copy()
if (self.atnums == None):
self.atnums = atnums
if len (atnums) != len (self.atnums) or any (array (atnums) != array (self.atnums)):
print_error
("""
ERROR: (ParaGauss) gxfile does not fit! Gxfile contains
wrong atoms! Please delete or change it before restart.
""")
raise Exception ("gxfile does not fit, delete or adjust!")
n = len(self.atnums)
loop = 1
# There may be a gxfile from another source make sure it
# contains the same meta data than our source:
t_gx = {}
if os.path.exists('gxfile'):
atnums, __, t_gx["isyms"], t_gx["inums"], t_gx["iconns"], t_gx["ivars"], t_gx["additional"], __, __, loop = gxread('gxfile')
for dat in self.data.keys():
if (np2.asarray(self.data[dat]) != np2.array(t_gx[dat])).any():
print_error ("ERROR: (ParaGauss) gxfile does not fit!")
print_error ("ERROR: (ParaGauss) gxfile contains wrong " + dat +" !")
print_error ("Please delete or change it before restart")
raise Exception("gxfile does not fit, delete or adjust!")
if (np2.array(atnums) != self.atnums).any():
print_error ("ERROR: (ParaGauss) gxfile does not fit!")
print_error ("ERROR: (ParaGauss) gxfile contains wrong atoms!")
print_error ("Please delete or change it before restart")
raise Exception("gxfile does not fit, delete or adjust!")
# Needs not to know size of system at init, but soon they will
# be needed
if "isyms" not in self.data:
if "isyms" in t_gx:
self.data.update(t_gx)
else:
def dummy_or_not(at):
if at == 0:
return 0
else:
return 1
self.data["isyms"] = np2.array([dummy_or_not(at) for at in atnums])
self.data["inums"] = np2.array(range(1,n+1))
self.data["iconns"] = np2.zeros((n,3))
self.data["ivars"] = np2.zeros((n,3))
self.data["additional"] = None
# Create gxfile with actual geometry for calculation units of
# positions should be Bohrs in here, so they are changed
gxwrite(self.atnums, self.positions/Bohr, self.data["isyms"], self.data["inums"], self.data["iconns"],\
self.data["ivars"], self.data["additional"], None, None, loop, file='gxfile' )
input = basename(self.input)
# FIXME: when copy_inp is True, we will occasionally overwrite
# the user supplied input with the version we saved at
# construction time over and over again. The danger is the
# user may assume he/she can edit the input while the job is
# running:
copy_inp = (self.copy_input == "always") \
or ((self.copy_input == "inexistent") and not isfile (input))
if copy_inp:
# This logic is to warn the user if he/she edits the file
# we are supposed to overwrite. FIXME: race condition:
if isfile (input):
with open (input, "r") as inputfile:
if inputfile.read() != self.inputstring:
print_error ("WARNING: Changes in", input, "will be overwritten!")
print_error (" Consider copy_input=\"inexistent\" or \"never\".")
# (Over)writing input here. FIXME: should we skip that if
# the content is already the same?
with open (input, "w") as inputfile:
inputfile.write (self.inputstring)
if self.optimizer is not None:
with open ("optimizer.input", "w") as optifile:
optifile.write (self.optimizer)
# The geometry file appears to be used for monitoring the
# progress by the user. Write it before starting potentially
# long-runnuing process. FIXME: we are supposed to "report"
# also computed properties! Therefore we call this once again
# after PG finishes:
self.report (atoms, "ParaGauss.xyz")
# The actual calcualtion starts about here. FIXME: at least
# once I did a mistake of specifying the input in the command
# line thus letting PG process the same input twice because it
# is already appended here:
cmd = self.cmdline + [input]
if self.silence:
stdout = open ("./ParaGauss.out", "w")
else:
stdout = sys.stdout
subprocess.call (cmd, stdout=stdout)
if self.silence:
stdout.close()
# Reads in new energy and forces
self.read()
# Do it once again, this time also with the valid energy:
self.report (atoms, "ParaGauss.xyz")
self.converged = True
def report (self, atoms, file):
#
# Report the energy (and the geometry currently calculated on)
# after a finshed calculation in ASE units
#
symbols = atoms.get_chemical_symbols()
natoms = len (symbols)
with open (file, "w") as f:
if self.__energy is not None:
f.write ('%d\nE = %22.15f eV\n' % (natoms, self.__energy * Hartree))
else:
f.write ('%d\nno energy\n' % (natoms,))
for s, (x, y, z) in zip (symbols, atoms.get_positions()):
f.write ('%-2s %22.15f %22.15f %22.15f\n' % (s, x, y, z))
def read(self):
#
# The interisting part to read in are the grads and energy,
# rest will be ignored afterwards
#
# FIXME: Somehow we need (sometimes) to pass some time here,
# before we can find the gxfile as output. This is especially
# valid when running several calculations in parallel. It's
# done this way and not with sleep as the problems seems to be
# related to a file that should but isn't there. So it makes
# sense to read all the files that are there, even if we don't
# need the output.
#
os.system("ls > /dev/null")
if isfile('o.' + basename(self.input) + '/trace_output'):
# If there are some trace files keep content of them, as
# if several calcualtions have been performed after
# another, only for the last iteration the trace would be
# available
f = open('o.' + basename(self.input) + '/trace_output', "r")
keep = f.read()
f.close()
f = open("keep_traces", "a")
f.write(keep)
f.close()
if os.path.exists('gxfile'):
__, __, __, __, __, __,__, self.__grads, self.__energy, loopi_d = gxread('gxfile')
if self.__energy is not None:
return
else:
print_error ("ParaGauss ERROR: Found no gxfile to read energy or forces from")
print_error ("There should be at least the one I created")
print_error ("Therefore something very strange happened")
print_error ("ERROR: I quit!!")
sys.exit(1)
self.__energy = self.parse_output(self.output)
def parse_output (self, output): # not actually using |self|
"""
Currently only returns the SCF energy.
Energy lines in SCF section look like this:
^ e_sum = -1.521590696368 [ 0.000000000000]
"""
import re
pattern = re.compile(r'\s*e_sum\s*=\s*(\S+)')
# In case we dont find anything:
e_sum = None
with open (output,'r') as lines:
for line in lines:
match = pattern.search (line)
if match is not None:
e_sum = float (match.group (1))
return e_sum
class PG_nml():
def __init__( self, nml_name, nml_keys={}, nml_data=[] ):
# A PG namelist consists of
# 1) a title (string)
# 2) some key-value pairs (dictionary)
# 3) an optional data-appendix (list, arbitrary type)
self.name = nml_name
self.keys = nml_keys
self.data = nml_data
def write( self ):
# Leave whitespace for better readability
inputtext = ['']
# Namelist header
inputtext += [ ' &' + self.name ]
# Namelist entries
for key, val in self.keys.iteritems():
# Should allow every type
inputtext += [' ' + key + ' = '+''.join(str([val])).replace('[','').replace(',','').replace(']','').replace('\'','') ]
# Conclude Namelist
inputtext += [ ' /' + self.name ]
# Data entries
for line in self.data:
inputtext += [' '+''.join(str([line])).replace('[','').replace(',','').replace(']','') ]
return inputtext
#
class PG_annotation():
def __init__( self, nml_text='#' ):
# A PG annotiation consists of
# 1) a simple text (string)
self.text = nml_text
def write( self ):
return [self.text]
#
class PG(Calculator):
#
# Alternative calculator, in a more ASE-like style to set up calculations quickly
#
def __init__( self
, exe = "/home/soini/PROGRAMMING/paragauss_mac_working/bin/runpg /home/soini/PROGRAMMING/paragauss_mac_working/mainscf_4test_suite"
,**kwargs ):
#
#
self.__cmd__ = exe
self.flag_keys = {'uks' : False
,'jexact' : False
,'saveread_ks': True
,'timers' : False
}
self.int_keys = {'max_scf' : 20
,'mix_beg' : 5
,'ndiis' : 5
,'nrad' : 150
,'nang' : 291
}
self.real_keys = {'e_conv' : 1.0e-8
,'d_conv' : 1.0e-6
,'scale_crit' : 1.0
,'mix_fix' : 0.25
,'smear_val' : 0.0
}
self.str_keys = {'task' : '"Gradients"'
,'sym' : '"C1"'
,'rel' : '"FALSE"'
,'xc' : '"PBE"'
,'mix_scf' : '"diis"'
,'smear' : '"FALSE"'
}
self.list_keys = {'ea' : [1]
,'basis' : {}
}
#
for key, val in kwargs.iteritems():
if self.flag_keys.has_key( key ):
self.flag_keys[key] = val
elif self.int_keys.has_key( key ):
self.int_keys[key] = val
elif self.real_keys.has_key( key ):
self.real_keys[key] = val
elif self.str_keys.has_key( key ):
self.str_keys[key] = '"' + val + '"'
elif self.list_keys.has_key( key ):
self.list_keys[key] = val
else:
self.__pg_error__( 'Illegal keyword '+key )
#
self.folder = 'o.input/'
#
self.atoms = None
self.mixing = self.__check__( self.str_keys['mix_scf'].lower(), ['"diis"', '"chargefit"'], 'mix_scf' )
self.smear = self.__check__( self.str_keys['smear'].lower(), ['"false"', '"gauss"', '"fermi"', '"sinus"'], 'smear' ).replace('"','')
self.scale_crit = 1.0
#
self.__e_tot = None
self.__forces = None
#
os.system('rm -rf trace.log')
#
def set_atoms(self, atoms):
if (atoms != self.atoms):
self.__got_output = False
self.atoms = atoms.copy()
#
def get_potential_energy(self, atoms):
from ase.units import Hartree
self.update(atoms)
if self.__got_output:
energies = []
for line in self.read( 'e_sum' ):
if '[' in line:
energies += [float(line[2])]
try:
self.__e_tot = energies[-1]*Hartree
except:
self.__pg_error__( 'No energies found in output.\n Check Paragauss.out! ' )
else:
self.__pg_error__( 'Failed while trying to retrieve energy from output' )
return self.__e_tot
#
def get_forces(self, atoms):
from ase.units import Hartree, Bohr
from numpy import zeros
self.update(atoms)
if self.__got_output:
grads = zeros( (atoms.get_number_of_atoms(), 3) )
i_run = 0
print (" ")
print (" Retrieving Gradients from output: ")
for line in self.read( 'Equal Center:' ):
grads[i_run,0] = line[2]
grads[i_run,1] = line[3]
grads[i_run,2] = line[4]
print (grads[i_run,0:2])
i_run += 1
self.__forces = - grads * Hartree / Bohr
print (" ")
else:
self.__pg_error__( 'Failed while trying to retrieve forces from output' )
return self.__forces
#
def get_stress(self, atoms):
raise NotImplementedError
#
def update( self, atoms ):
from os import path
self.__scale_convergency_criteria__()
if self.atoms != atoms or not self.__got_output:
self.set_atoms( atoms )
self.__write_input__( self.atoms )
self.calculate()
# We should have an output at this point
if path.exists(self.folder+'output'):
self.__got_output = True
else:
self.__pg_error__( 'No output in file output or o.input/output. Something went terribly wrong' )
#
def calculate( self ):
from os import path
os.system('rm -rf')
cmd = self.__cmd__ + ' ' + 'input'
cmd += ' > ParaGauss.out'
tty = os.system(cmd)
if path.exists( 'o.input' ):
self.folder = 'o.input/'
else:
self.folder = ''
if self.real_keys['scale_crit'] > 1.0:
os.system('echo " Automatically setting convergency criteria to: e_conv = '+str(self.real_keys['e_conv']*self.scale_crit)+' d_conv = '+str(self.real_keys['d_conv']*self.scale_crit)+' " >> trace.log')
os.system('cat '+self.folder+'trace_output >> trace.log')
#
def read( self, arg ):
resultlines = []
for line in open(self.folder+'output'):
if arg in line:
resultlines += [line.split()]
return resultlines
#
def __write_input__( self, atoms ):
#
self.p_ua, self.n_ua = self.check_sym( atoms )
self.n_el, self.b_ua = self.check_bas( self.p_ua, atoms, self.list_keys['basis'] )
#
nmls = self.define_nmls( atoms )
#
input = open('input','w')
for nml in nmls:
for line in nml.write():
input.write("%s\n" % line )
input.close()
#
def __write_namelist__( self, namelist, entries ):
inputtext = [ '', ' &'+namelist ]
for key, val in entries.iteritems():
inputtext.append( ' '+key+' = '+str(val) )
inputtext.append( ' /'+namelist )
return inputtext
#
#
def __check__( self, entry, allowed=[True,False], entryname='abcdefg' ):
if entry not in allowed:
print ('ERROR: only')
for x in allowed:
print (str(x))
print (' allowed for '+str(entryname))
sys.exit()
return entry
#
def __scale_convergency_criteria__( self ):
from ase.units import Hartree, Bohr
if self.real_keys['scale_crit'] > 1.0:
if self.__forces != None:
# Scale according to forces. Upper bound is 0.1, lower bound is e_conv and d_conv
self.scale_crit = min([ 0.1/max([self.real_keys['e_conv'],self.real_keys['d_conv']])
, max( [ 1.0, self.real_keys['scale_crit'] * self.__max_force__( self.__forces * Bohr / Hartree )] )] )
else:
# Scale assuming forces of 1.0
self.scale_crit = self.real_keys['scale_crit']
#
def __max_force__( self, grads ):
max_force = 0.0
for grad in grads:
max_force = max( [max_force, (grad[0]*grad[0]+grad[1]*grad[1]+grad[2]*grad[2])**0.5 ] )
return max_force
#
def __pg_error__( self, msg ):
import sys
print ()
print (' #### ERROR: '+msg+' ####')
print ()
sys.exit()
#
def __point_groups__( self ):
return ['"C1"','"C2"','"C3"','"C4"','"C5"','"C6"','"C7"','"C8"','"C9"','"C10"','"Ci"'
,'"CS"','"S4"','"S6"','"S8"','"S10"','"S12"','"S14"','"S16"','"S18"','"S20"'
,'"D2"','"D3"','"D4"','"D5"','"D6"','"D7"','"D8"','"D9"','"D10"'
,'"D2H"','"D3H"','"D4H"','"D5H"','"D6H"','"D7H"','"D8H"','"D9H"','"D10H"','"Dinh"'
,'"D2D"','"D3D"','"D4D"','"D5D"','"D6D"','"D7D"','"D8D"','"D9D"','"D10D"'
,'"C2V"','"C3V"','"C4V"','"C5V"','"C6V"','"C7V"','"C8V"','"C9V"','"C10V"','"Cinv"'
,'"C2H"','"C3H"','"C4H"','"C5H"','"C6H"','"C7H"','"C8H"','"C9H"','"C10H"'
,'"O"','"T"','"OH"','"TH"','"TD"','"I"','"IH"']
#
def check_sym( self, atoms ):
#
self.str_keys['sym'] = self.__check__( self.str_keys['sym'].upper()
, allowed=self.__point_groups__()
, entryname='sym' )
#
if self.str_keys['sym'] == '"C1"' or atoms.get_number_of_atoms() == 1 and sum(abs(atoms.get_positions())) == 0.0:
self.list_keys['ea'] = [1]*atoms.get_number_of_atoms()
#
p_ua = [None]*len( self.list_keys['ea'] )
i_run = 0
for i_ua in range(len(p_ua)):
p_ua[i_ua] = i_run
i_run += self.list_keys['ea'][i_ua]
self.__check__( i_run, [atoms.get_number_of_atoms()], entryname='p_ua' )
n_ua = len( self.list_keys['ea'] )
return p_ua, n_ua
#
def check_bas( self, p_ua, atoms, bas_raw ):
from os import path
# check given basis for consistency
el = {}
bl = []
for i_run in p_ua:
symbol = atoms.get_chemical_symbols()[i_run].lower()
# count elements
try:
el[symbol] += 1
except:
el[symbol] = 1
#
if bas_raw.has_key( symbol ):
basisfilename = bas_raw[symbol]
if path.exists( basisfilename ):
bl += [basisfilename]
else:
self.__pg_error__( 'Basis set file '+basisfilename+' missing' )
else:
self.__pg_error__( 'Need basis set for element '+symbol )
return len( el ), bl
#
def define_nmls( self, atoms ):
from time import clock
from os import path
# Define input as a list of PG_nml types.
# Where applicable, pass input through __check__ filter
#
# Add header for section
head1 = PG_annotation( '\n#'+('# define calculation #').center(80,'~')+'#' )
#
# namelist TASKS
tasks = PG_nml( 'tasks',
{ 'task': self.__check__( self.str_keys['task'].lower(), ['"singlepoint"','"gradients"'], 'task' ) } )
#
# namelist MAIN_OPTIONS
maino = PG_nml( 'main_options'
, { 'spin_restricted' : not self.flag_keys['uks']
, 'relativistic' : self.__check__( self.str_keys['rel'].lower(), ['"true"','"false"','"adkh"'], 'rel' )
, 'perturbation_theory' : self.__check__( self.str_keys['mix_scf'].lower(), ['"diis"', '"chargefit"'], 'mix_scf' ) == '"chargefit"'
# override internal ParaGauss defaults !!
, 'integrals_on_file' : 'False # predefined by PG-calculator' } )
#
# NAMELIST OUTPUT TIMING
timers = PG_nml( 'output_timing'
, { 'output_timing_summary' : self.flag_keys['timers']
, 'output_timing_detailedsummary' : self.flag_keys['timers']
, 'output_timing_integrals' : self.flag_keys['timers']
, 'output_timing_detailedintegrals' : self.flag_keys['timers']
, 'output_timing_scfloops' : self.flag_keys['timers']
, 'output_timing_scf' : self.flag_keys['timers']
, 'output_timing_detailedscf' : self.flag_keys['timers']
, 'output_timing_post_scf' : self.flag_keys['timers']
, 'output_timing_detailedpostscf' : self.flag_keys['timers']
, 'output_timing_slaves' : self.flag_keys['timers']
, 'output_timing_interrupts' : self.flag_keys['timers'] } )
#
# NAMELIST RECOVER_OPTIONS
recoo = PG_nml( 'recover_options'
, { 'save_ksmatrix' : self.flag_keys['saveread_ks']
, 'read_ksmatrix' : self.flag_keys['saveread_ks'] and path.exists('saved_ksmatrix.dat') } )
#
if self.mixing == '"diis"':
# NAMELIST MIXING
mixin = PG_nml( 'mixing'
, { 'chmix' : 1.0
, 'spmix' : 1.0
, 'xcmix' : 1.0
, 'start_after_cycle' : 1000000 } )
#
# NAMELIST DIIS
diis = PG_nml( 'diis'
, { 'diis_on' : self.int_keys['ndiis'] > 0
, 'mmax' : self.int_keys['ndiis']
, 'loop_start' : self.int_keys['mix_beg']
, 'threshold' : 0.15
, 'cfix' : self.real_keys['mix_fix'] } )
else:
mixin = PG_nml( 'mixing'
, { 'chmix' : self.real_keys['mix_fix']
, 'spmix' : 1.0
, 'xcmix' : 1.0
, 'start_after_cycle' : self.int_keys['mix_beg'] } )
diis = PG_nml( 'diis' , { } )
#
# NAMELIST CONVERGENCE_LIST
convl = PG_nml( 'convergence_list'
, { 'max_iteration': self.int_keys['max_scf']
, 'energy_criterion': self.real_keys['e_conv']*self.scale_crit
, 'density_criterion': self.real_keys['d_conv']*self.scale_crit
, 'energy_dev_checked': 3 } )
#
# NAMELIST XC_CONTROL
xccnt = PG_nml( 'xc_control'
, { 'xc': self.str_keys['xc'] } )
#
# NAMELIST FERMI
if self.real_keys['smear_val'] > 0.0 or self.smear != 'FALSE':
smear = PG_nml( 'fermi'
, { 'fermi_'+self.smear: 'true'
, 'fermi_sigma': self.real_keys['smear_val'] } )
#
# NAMELIST ERI4C
eri4c = PG_nml( 'eri4c'
, { 'j_exact': self.flag_keys['jexact'] } )
#
head2 = PG_annotation( '\n#'+('# define system #').center(80,'~')+'#' )
#
# NAMELIST SYMMETRY_GROUP
symgr = PG_nml( 'symmetry_group'
, { 'point_group': self.str_keys['sym'] } )
#
# NAMELIST UNIQUE_ATOM_NUMBER
uanum = PG_nml( 'unique_atom_number'
, { 'n_unique_atoms': self.n_ua } )
#
uanml = []
for i_ua in range(self.n_ua):
i_run = self.p_ua[i_ua]
#
# NAMELIST N_UNIQUE_ATOMS
uanml += [ PG_nml( 'unique_atom # '+str(i_ua+1)
, { 'name' : '"'+atoms.get_chemical_symbols()[i_run]+'"'
, 'z' : str(atoms.get_atomic_numbers()[i_run])+'.0'
, 'n_equal_atoms': self.list_keys['ea'][i_ua] }
, [list(atoms.get_positions()[i_run]/Bohr)] ) ]
#
head3 = PG_annotation( '\n#'+('# define grid and basis #').center(80,'~')+'#' )
#
# NAMELIST GRID
grid = PG_nml( 'grid', { 'sym_reduce': True } )
#
ganml = []
for i_ua in range(self.n_ua):
# NAMELIST GRIDATOM
ganml += [ PG_nml( 'gridatom # '+str(i_ua+1)
, { 'nrad': self.int_keys['nrad']
, 'nang': self.int_keys['nang'] } ) ]
#
blist = []
for i_ua in range(self.n_ua):
# GIVE BASIS SET FILE # no explicit namelists for now
blist +=[ PG_annotation( '\n~'+self.b_ua[i_ua] ) ]
#
# FINAL LINE
final = PG_annotation( '\n#'+('# compiled at '+str(clock())+' #').center(80,'~')+'#\n' )
#
return [ head1, tasks, maino, timers, recoo, mixin, diis, convl, smear, xccnt, eri4c
, head2, symgr, uanum ]+uanml+[
head3, grid]+ganml+blist+[ final ]
|
alexei-matveev/ase-local
|
ase/calculators/paragauss.py
|
Python
|
gpl-2.0
| 30,147
|
[
"ASE"
] |
8485517205f4b170a76b100021559e181af1e31fbf1e8949a9d466a1c37c7585
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import collections
from pymatgen.core.units import (
ArrayWithUnit,
Energy,
EnergyArray,
FloatWithUnit,
Length,
LengthArray,
Mass,
Memory,
Time,
TimeArray,
Unit,
UnitError,
unitized,
)
from pymatgen.util.testing import PymatgenTest
class UnitTest(PymatgenTest):
def test_init(self):
u1 = Unit((("m", 1), ("s", -1)))
self.assertEqual(str(u1), "m s^-1")
u2 = Unit("kg m ^ 2 s ^ -2")
self.assertEqual(str(u2), "J")
self.assertEqual(str(u1 * u2), "J m s^-1")
self.assertEqual(str(u2 / u1), "J s m^-1")
self.assertEqual(str(u1 / Unit("m")), "Hz")
self.assertEqual(str(u1 * Unit("s")), "m")
acc = u1 / Unit("s")
newton = Unit("kg") * acc
self.assertEqual(str(newton * Unit("m")), "N m")
class FloatWithUnitTest(PymatgenTest):
def test_energy(self):
a = Energy(1.1, "eV")
b = a.to("Ha")
self.assertAlmostEqual(b, 0.0404242579378)
c = Energy(3.14, "J")
self.assertAlmostEqual(c.to("eV"), 1.9598338493806797e19)
self.assertRaises(UnitError, Energy, 1, "m")
d = Energy(1, "Ha")
self.assertAlmostEqual(a + d, 28.311386245987997)
self.assertAlmostEqual(a - d, -26.111386245987994)
self.assertEqual(a + 1, 2.1)
self.assertEqual(str(a / d), "1.1 eV Ha^-1")
e = Energy(1, "kJ")
f = e.to("kCal")
self.assertAlmostEqual(f, 0.2390057361376673)
self.assertEqual(str(e + f), "2.0 kJ")
self.assertEqual(str(f + e), "0.4780114722753346 kCal")
def test_time(self):
a = Time(20, "h")
self.assertAlmostEqual(float(a.to("s")), 3600 * 20)
# Test left and right multiplication.
b = a * 3
self.assertAlmostEqual(float(b), 60.0)
self.assertEqual(str(b.unit), "h")
self.assertEqual(float(3 * a), 60.0)
a = Time(0.5, "d")
self.assertAlmostEqual(float(a.to("s")), 3600 * 24 * 0.5)
def test_length(self):
x = Length(4.2, "ang")
self.assertAlmostEqual(x.to("cm"), 4.2e-08)
self.assertEqual(x.to("pm"), 420)
self.assertEqual(str(x / 2), "2.1 ang")
y = x ** 3
self.assertAlmostEqual(y, 74.088)
self.assertEqual(str(y.unit), "ang^3")
def test_memory(self):
mega = Memory(1, "Mb")
self.assertEqual(mega.to("byte"), 1024 ** 2)
self.assertEqual(mega, Memory(1, "mb"))
same_mega = Memory.from_string("1Mb")
self.assertEqual(same_mega.unit_type, "memory")
other_mega = Memory.from_string("+1.0 mb")
self.assertEqual(mega, other_mega)
def test_unitized(self):
@unitized("eV")
def f():
return [1, 2, 3]
self.assertEqual(str(f()[0]), "1.0 eV")
self.assertIsInstance(f(), list)
@unitized("eV")
def g():
return 2, 3, 4
self.assertEqual(str(g()[0]), "2.0 eV")
self.assertIsInstance(g(), tuple)
@unitized("pm")
def h():
d = collections.OrderedDict()
for i in range(3):
d[i] = i * 20
return d
self.assertEqual(str(h()[1]), "20.0 pm")
self.assertIsInstance(h(), collections.OrderedDict)
@unitized("kg")
def i():
return FloatWithUnit(5, "g")
self.assertEqual(i(), FloatWithUnit(0.005, "kg"))
@unitized("kg")
def j():
return ArrayWithUnit([5, 10], "g")
j_out = j()
self.assertEqual(j_out.unit, Unit("kg"))
self.assertEqual(j_out[0], 0.005)
self.assertEqual(j_out[1], 0.01)
def test_compound_operations(self):
g = 10 * Length(1, "m") / (Time(1, "s") ** 2)
e = Mass(1, "kg") * g * Length(1, "m")
self.assertEqual(str(e), "10.0 N m")
form_e = FloatWithUnit(10, unit="kJ mol^-1").to("eV atom^-1")
self.assertAlmostEqual(float(form_e), 0.103642691905)
self.assertEqual(str(form_e.unit), "eV atom^-1")
self.assertRaises(UnitError, form_e.to, "m s^-1")
a = FloatWithUnit(1.0, "Ha^3")
b = a.to("J^3")
self.assertAlmostEqual(b, 8.28672661615e-53)
self.assertEqual(str(b.unit), "J^3")
a = FloatWithUnit(1.0, "Ha bohr^-2")
b = a.to("J m^-2")
self.assertAlmostEqual(b, 1556.8931028218924)
self.assertEqual(str(b.unit), "J m^-2")
def test_as_base_units(self):
x = FloatWithUnit(5, "MPa")
self.assertEqual(FloatWithUnit(5000000, "Pa"), x.as_base_units)
class ArrayWithFloatWithUnitTest(PymatgenTest):
def test_energy(self):
"""
Similar to FloatWithUnitTest.test_energy.
Check whether EnergyArray and FloatWithUnit have same behavior.
# TODO
One can merge the two tests easily:
for obj in [Energy, EnergyArray]:
a = obj(...)
self.assert(...)
"""
a = EnergyArray(1.1, "eV")
b = a.to("Ha")
self.assertAlmostEqual(float(b), 0.0404242579378)
c = EnergyArray(3.14, "J")
self.assertAlmostEqual(float(c.to("eV")), 1.9598338493806797e19, 5)
# self.assertRaises(ValueError, Energy, 1, "m")
d = EnergyArray(1, "Ha")
self.assertAlmostEqual(float(a + d), 28.311386245987997)
self.assertAlmostEqual(float(a - d), -26.111386245987994)
self.assertEqual(float(a + 1), 2.1)
def test_time(self):
"""
Similar to FloatWithUnitTest.test_time.
Check whether EnergyArray and FloatWithUnit have same behavior.
"""
# here there's a minor difference because we have a ndarray with
# dtype=np.int_.
a = TimeArray(20, "h")
self.assertAlmostEqual(a.to("s"), 3600 * 20)
# Test left and right multiplication.
self.assertEqual(str(a * 3), "60 h")
self.assertEqual(str(3 * a), "60 h")
def test_length(self):
"""
Similar to FloatWithUnitTest.test_time.
Check whether EnergyArray and FloatWithUnit have same behavior.
"""
x = LengthArray(4.2, "ang")
self.assertAlmostEqual(float(x.to("cm")), 4.2e-08)
self.assertEqual(float(x.to("pm")), 420)
self.assertEqual(str(x / 2), "2.1 ang")
def test_array_algebra(self):
ene_ha = EnergyArray([1, 2], "Ha")
ene_ev = EnergyArray([1, 2], "eV")
time_s = TimeArray([1, 2], "s")
e1 = ene_ha.copy()
e1 += 1
e2 = ene_ha.copy()
e2 -= 1
e3 = ene_ha.copy()
# e3 /= 2
e4 = ene_ha.copy()
e4 *= 2
objects_with_unit = [
ene_ha + ene_ev,
ene_ha - ene_ev,
3 * ene_ha,
ene_ha * 3,
ene_ha / 3,
3 / ene_ha,
ene_ha * time_s,
ene_ha / ene_ev,
ene_ha.copy(),
ene_ha[0:1],
e1,
e2,
e3,
e4,
]
for i, obj in enumerate(objects_with_unit):
self.assertTrue(hasattr(obj, "unit"))
objects_without_unit = [
# Here we could return a FloatWithUnit object but I prefer this
# a bare scalar since FloatWithUnit extends float while we could
# have an int.
ene_ha[0],
]
for obj in objects_without_unit:
self.assertFalse(hasattr(obj, "unit"))
with self.assertRaises(UnitError):
ene_ha + time_s
def test_factors(self):
e = EnergyArray([27.21138386, 1], "eV").to("Ha")
self.assertTrue(str(e).endswith("Ha"))
l = LengthArray([1.0], "ang").to("bohr")
self.assertTrue(str(l).endswith(" bohr"))
v = ArrayWithUnit([1, 2, 3], "bohr^3").to("ang^3")
self.assertTrue(str(v).endswith(" ang^3"))
def test_as_base_units(self):
x = ArrayWithUnit([5, 10], "MPa")
self.assertArrayEqual(ArrayWithUnit([5000000, 10000000], "Pa"), x.as_base_units)
class DataPersistenceTest(PymatgenTest):
def test_pickle(self):
"""Test whether FloatWithUnit and ArrayWithUnit support pickle"""
for cls in [FloatWithUnit, ArrayWithUnit]:
a = cls(1, "eV")
b = cls(10, "N bohr")
objects = [a, b]
new_objects_from_protocol = self.serialize_with_pickle(objects)
for new_objects in new_objects_from_protocol:
for old_item, new_item in zip(objects, new_objects):
self.assertTrue(str(old_item) == str(new_item))
if __name__ == "__main__":
import unittest
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/core/tests/test_units.py
|
Python
|
mit
| 8,777
|
[
"pymatgen"
] |
60658d34947f00f070974808783dca745f3ba3798b46006fa52ce23336fe82bf
|
"""
A first test for the ELBO on the diffusion problem.
The target is consisted of an and a Gaussian likelihood.
The approximating mixture has two components.
Author:
Panagiotis Tsilifis
Date:
6/16/2014
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import cPickle as pickle
from scipy.stats.distributions import norm
import math
from vuq import GammaPDF
from vuq import UniformND
from vuq import PDFCollection
from vuq import IsotropicGaussianLikelihood
from vuq import MultivariateNormal
from vuq import Joint
from vuq import MixturePDF
from vuq import MixtureOfMultivariateNormals
from vuq import FirstOrderEntropyApproximation
from vuq import ThirdOrderExpectationFunctional
from vuq import EvidenceLowerBound
from vuq import Optimizer
import sys
sys.path.insert(0,'demos/')
from diffusion import ContaminantTransportModelUpperLeft
# Number of dimensions
num_dim = 3
# The number of components to use for the mixture
num_comp = 1
#-------- The (hypothetical) joint distribution ----------------
# The prior
collection = [UniformND(1), UniformND(1), GammaPDF(1,0.05,1)]
prior = PDFCollection(collection)
# The data
data = np.load('data_concentrations_upperleft_corner.npy')
# The forward model
diff_model = ContaminantTransportModelUpperLeft()
print 'Num_input'
print str(diff_model.num_input) + '\n'
# The isotropic Likelihood
IsotropicL = IsotropicGaussianLikelihood(data[:], diff_model)
# The joint
log_p = Joint(IsotropicL, prior)
print 'Target:'
print str(log_p)
# The approximating distribution
comp = [MultivariateNormal(np.random.gamma(10,1,num_dim)), MultivariateNormal(np.random.gamma(10,1,num_dim))]
# MultivariateNormal(np.random.gamma(10,1,num_dim))]#, MultivariateNormal(np.random.gamma(10,1,num_dim))]
log_q = MixtureOfMultivariateNormals(comp)
log_q.comp[0].mu = np.ones(log_q.comp[0].mu.shape) * 0.25
log_q.comp[1].mu = np.ones(log_q.comp[0].mu.shape) * 0.75
#log_q.comp[2].mu = np.ones(log_q.comp[2].mu.shape) * 0.4
#log_q.comp[3].mu = np.ones(log_q.comp[3].mu.shape) * 0.6
log_q.comp[0].C = np.eye(num_dim) * 1e-4
log_q.comp[1].C = np.eye(num_dim) * 1e-4
#log_q.comp[2].C = np.eye(num_dim) * 1e-4
#log_q.comp[3].C = np.eye(num_dim) * 1e-4
print 'Initial:'
print log_q
# Pick an entropy approximation
entropy = FirstOrderEntropyApproximation()
# Pick an approximation for the expectation of the joint
expectation_functional = ThirdOrderExpectationFunctional(log_p)
# Restrictions for mu
mu_bounds = (tuple((0., 1.) for i in xrange(log_q.num_dim - 1))
+ ((1e-6, None), ))
C_bounds = tuple((1e-32, None) for i in xrange(log_q.num_comp * log_q.num_dim))
# Build the ELBO
elbo = EvidenceLowerBound(entropy, expectation_functional)
print 'ELBO:'
print str(elbo)
# Optimize the elbo
optimizer = Optimizer(elbo)
results_file = os.path.join('demos', 'diffusion_upleft_cali.pcl')
if os.path.exists(results_file):
print 'I found:', results_file
print 'I am skipping the experiment.'
print 'Delete the file if you want to repeat it.'
with open(results_file, 'rb') as fd:
results = pickle.load(fd)
L = results['L']
log_q = results['log_q']
else:
L = optimizer.optimize(log_q, tol=1e-3, max_it=10, mu_bounds=mu_bounds,
mu_constraints=None, C_bounds=C_bounds)
result = {}
result['L'] = L
result['log_q'] = log_q
with open(os.path.join('demos', 'diffusion_upleft_cali.pcl'), 'wb') as fd:
pickle.dump(result, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(L, linewidth=2)
ax.set_xlabel('Iteration', fontsize=16)
ax.set_ylabel('ELBO', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'diffusion_upleft_elbo.png')
print 'Writing:', png_file
plt.savefig(png_file)
for i in xrange(log_q.num_dim):
mu = log_q.comp[0].mu[i]
s = math.sqrt(log_q.comp[0].C[i, i])
if i < 2:
name = 'x_{%s}' % (i+1)
else:
name = 'sigma^2'
print name, '=', mu, '+-', s
# Plot the calibration result
t = np.array([ 0.075, 0.15, 0.225, 0.3])
fig = plt.figure()
ax = fig.add_subplot(111)
f = diff_model._eval_u(log_q.comp[0].mu[:2])
Y = f.reshape(4, 1)
data = data.reshape(4, 1)
styles = ['b']
ax.plot(t, Y[:, 0], styles[0], linewidth=2)
ax.plot(t, data[:,0], '+' + styles[0], markersize=10, markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'diffusion_upleft_cali_output.png')
print 'Writing:', png_file
plt.savefig(png_file)
# Do an uncertainty propagation test.
uq_file = os.path.join('demos', 'diffusion_upleft_cali_uq.pcl')
if os.path.exists(uq_file):
with open(uq_file, 'rb') as fd:
uq_results = pickle.load(fd)
Y_m = uq_results['Y_m']
Y_p05 = uq_results['Y_p05']
Y_p95 = uq_results['Y_p95']
else:
num_mcmc = 100
Y_s = []
for i in xrange(num_mcmc):
print 'taking sample', i + 1
omega = log_q.sample().flatten()
x = omega[:2]
sigma = omega[2]
y = diff_model._eval_u(x)
Y_s.append(y + sigma * np.random.randn(*y.shape))
Y_s = np.vstack(Y_s)
Y_m = np.percentile(Y_s, 50, axis=0).reshape(Y.shape)
Y_p05 = np.percentile(Y_s, 5, axis=0).reshape(Y.shape)
Y_p95 = np.percentile(Y_s, 95, axis=0).reshape(Y.shape)
uq_results = {}
uq_results['Y_m'] = Y_m
uq_results['Y_p05'] = Y_p05
uq_results['Y_p95'] = Y_p95
with open(uq_file, 'wb') as fd:
pickle.dump(uq_results, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, Y_m[:, 0], styles[0], linewidth=2)
ax.fill_between(t, Y_p05[:, 0], Y_p95[:, 0], color=styles[0], alpha=0.5)
ax.plot(t, data[:, 0], '+' + styles[0], markersize=10,
markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'diffusion_upleft_cali_uq.png')
print 'Writing:', png_file
plt.savefig(png_file)
print str(log_q)
comp_0 = [MultivariateNormal(log_q.comp[0].mu[:2]), MultivariateNormal(log_q.comp[1].mu[:2]),
MultivariateNormal(log_q.comp[2].mu[:2]), MultivariateNormal(log_q.comp[3].mu[:2])]
mixture_0 = MixtureOfMultivariateNormals(comp_0)
mixture_0.comp[0].C = log_q.comp[0].C[:2,:2]
mixture_0.comp[1].C = log_q.comp[1].C[:2,:2]
mixture_0.comp[2].C = log_q.comp[2].C[:2,:2]
mixture_0.comp[3].C = log_q.comp[3].C[:2,:2]
x_0 = np.linspace(0.05,0.75,150)[:,None]
X1, X2 = np.meshgrid(x_0, x_0)
XX = np.hstack([X1.flatten()[:,None], X2.flatten()[:, None]])
Z = mixture_0(XX)
Z = Z.reshape(X1.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, np.exp(Z))
cbar = fig.colorbar(cax)
png_file = os.path.join('figures', 'diffusion_upleft_mixture.png')
print 'Writing: ', png_file
#plt.show()
plt.savefig(png_file)
|
ebilionis/variational-reformulation-of-inverse-problems
|
unittests/test_optimize_diffusion_upperleft.py
|
Python
|
gpl-2.0
| 7,014
|
[
"Gaussian"
] |
f5c370063e7e7f3576f2010653e414d2e8b00dd4df7c4089ed4f96644d5001b7
|
# coding: utf-8
from StringIO import StringIO
import unicodecsv
from grano.logic import Loader
from grano.logic.schemata import import_schema
# This source URL will be applied to all properties without their own lineage:
DEFAULT_SOURCE_URL = 'http://www.opennews.org/'
DATA = """fellow_name,twitter_handle,start_date,end_date,organization_name,organization_url
Ben Chartoff,,2014-02-01,2014-12-01,Washington Post,http://www.washingtonpost.com/
Mark Boas,maboas,2012-02-01,2012-12-01,Al Jazeera English,http://www.aljazeera.com/
Noah Veltman,veltman,2013-02-01,2013-12-01,BBC,http://www.bbc.co.uk
Laurian Gridinoc,gridinoc,2012-02-01,2012-12-01,BBC,http://www.bbc.co.uk
Sonya Song,sonya2song,2013-02-01,2013-12-01,Boston Globe,http://www.bostonglobe.com/
Dan Schultz,slifty,2012-02-01,2012-12-01,Boston Globe,http://www.bostonglobe.com/
Gabriela Rodriguez,gaba,2014-02-01,2014-12-01,La Nacion,http://www.lanacion.com
Manuel Aristarán,manuelaristaran,2013-02-01,2013-12-01,La Nacion,http://www.lanacion.com
Harlo Holmes,harlo,2014-02-01,2014-12-01,New York Times,http://www.nytimes.com
Brian Abelson,brianabelson,2013-02-01,2013-12-01,New York Times,http://www.nytimes.com
Brian Jacobs,btjakes,2014-02-01,2014-12-01,ProPublica,http://www.propublica.org
Mike Tigas,mtigas,2013-02-01,2013-12-01,ProPublica,http://www.propublica.org
Friedrich Lindenberg,pudo,2013-02-01,2013-12-01,Spiegel Online,http://www.spiegel.de/
Marcos Vanetta,malev,2014-02-01,2014-12-01,Texas Tribune,http://www.texastribune.org/
Stijn Debrouwere,stdbrouw,2013-02-01,2013-12-01,The Guardian,http://www.theguardian.com/uk
Nicola Hughes,DataMinerUK,2012-02-01,2012-12-01,The Guardian,http://www.theguardian.com/uk
Aurelia Moser,auremoser,2014-02-01,2014-12-01,Ushahidi / Internews Kenya,http://www.ushahidi.com/
Annabel Church,annabelchurch,2013-02-01,2013-12-01,Zeit Online,http://www.zeit.de/index
Cole Gillespie,theCole,2012-02-01,2012-12-01,Zeit Online,http://www.zeit.de/index"""
SCHEMATA = """
- name: fellow
label: An OpenNews fellow
obj: entity
hidden: no
attributes:
- name: twitter_handle
label: Twitter handle
- name: news_organization
label: A news organization
obj: entity
hidden: no
attributes:
- name: url
label: URL
- name: fellowship
label: A Fellowship
label_in: Was hosted by
label_out: Worked for
obj: relation
attributes:
- name: start_date
label: Start date
- name: end_date
label: End date
"""
def create_fixtures():
loader = Loader('opennews', project_label='Open News',
project_settings={},
source_url=DEFAULT_SOURCE_URL)
import_schema(loader.project, StringIO(SCHEMATA))
reader = unicodecsv.reader(StringIO(DATA))
reader.next()
for record in reader:
fellow = loader.make_entity(['fellow'])
fellow.set('name', record[0])
fellow.set('twitter_handle', record[1])
fellow.save()
news_org = loader.make_entity(['news_organization'])
news_org.set('name', record[4])
news_org.set('url', record[5])
news_org.save()
fellowship = loader.make_relation('fellowship', fellow, news_org)
fellowship.set('start_date', record[2])
fellowship.set('end_date', record[3])
fellowship.save()
loader.persist()
|
clkao/grano
|
grano/test/fixtures.py
|
Python
|
mit
| 3,340
|
[
"Brian"
] |
f1044d5d1e80ea4a193aa10ba4004983be0f3f88a92b3f9f9fc04114d3a43eb2
|
#!/usr/bin/python
# check_glider_netcdf.py - Verifies that a glider NetCDF file from a provider
# contains all the required global attributes, dimensions, scalar variables
# and dimensioned variables. Prints out missing items.
#
# Returns:
# 0 - File complies to NGDAC standard
# 1+ - Number of errors
#
# By: Michael Lindemuth <mlindemu@usf.edu>
# University of South Florida
# College of Marine Science
# Ocean Technology Group
import argparse
import sys
from os import path
import json
from netCDF4 import Dataset
def test_global_attributes(nc, requirements):
""" Tests for required global attributes
"""
retVal = 0
global_attributes = nc.ncattrs()
for req_attribute in requirements['global_attributes']:
if req_attribute not in global_attributes:
print "Global Attribute Missing: %s" % (req_attribute)
retVal += 1
return retVal
def test_dimensions(nc, requirements):
""" Tests for required dimensions
"""
retVal = 0
for req_dimension in requirements['dimensions']:
if req_dimension not in nc.dimensions:
print "Dimension Missing: %s" % (req_dimension)
retVal += 1
return retVal
def test_required_variables(nc, requirements):
""" Tests for required variables
"""
retVal = 0
for req_variable in requirements['required_variables']:
variables = nc.variables
if req_variable not in variables:
print "Missing required variable %s" % req_variable
retVal += 1
return retVal
def test_variable_attributes(nc, requirements):
""" Tests for required variable attributes
"""
retVal = 0
for variable_name in nc.variables:
# Skip QC variables
if variable_name[-2:] == "qc":
continue
# Ignore configured variables
if variable_name in requirements['ignore_variable_check']:
continue
variable = nc.variables[variable_name]
# Skip scalar and descriptive variables
if variable.size < 2:
continue
var_attrs = nc.variables[variable_name].ncattrs()
for req_var_attr in requirements['variable_attributes']:
if req_var_attr not in var_attrs:
print("Variable attribute %s "
"missing in %s variable" % (req_var_attr, variable_name))
retVal += 1
return retVal
def test_qc_variables(nc, requirements):
""" Tests that all variables have a corresponding qc variable
"""
retVal = 0
for variable_name in nc.variables:
# Skip QC variables
if variable_name[-2:] == "qc":
continue
# Ignore configured variables
if variable_name in requirements['ignore_variable_check']:
continue
variable = nc.variables[variable_name]
if variable.size < 2:
continue
qc_name = "%s_qc" % variable_name
if qc_name not in nc.variables:
print("QC variable missing for %s" % variable_name)
retVal += 1
return retVal
def test_platform_attributes(nc, requirements):
""" Tests for required platform attributes
"""
retVal = 0
platform_attrs = nc.variables['platform'].ncattrs()
for req_platform_attr in requirements['platform_attributes']:
if req_platform_attr not in platform_attrs:
print "Platform attribute %s missing" % req_platform_attr
retVal += 1
return retVal
def test_ctd_attributes(nc, requirements):
""" Tests for required ctd attributes
"""
retVal = 0
ctd_attrs = nc.variables['instrument_ctd'].ncattrs()
for req_ctd_attr in requirements['ctd_attributes']:
if req_ctd_attr not in ctd_attrs:
print "CTD attribute %s missing" % req_ctd_attr
retVal += 1
return retVal
test_functions = [
test_global_attributes,
test_dimensions,
test_required_variables,
test_variable_attributes,
test_qc_variables,
test_platform_attributes,
test_ctd_attributes
]
def main():
parser = argparse.ArgumentParser(
description='Verifies that a glider NetCDF file from a provider '
'contains all the required global attributes, dimensions,'
'scalar variables and dimensioned variables.'
)
default_standard_path = (
path.join(
path.dirname(__file__),
'..',
'etc',
'glider_DAC-2.0.json'
)
)
parser.add_argument(
'-s', '--path_to_standard',
default=default_standard_path
)
parser.add_argument(
'path_to_glider_netcdf',
help='Path to Glider NetCDF file.'
)
args = parser.parse_args()
# Load requirements spec
with open(args.path_to_standard, 'r') as f:
contents = f.read()
requirements = json.loads(contents)
# Load NetCDF file
nc = Dataset(
args.path_to_glider_netcdf, 'r',
format='NETCDF4_CLASSIC'
)
# Initialize return value
retVal = 0
for test_fun in test_functions:
retVal += test_fun(nc, requirements)
if retVal == 0:
print "PASS"
return retVal
if __name__ == '__main__':
sys.exit(main())
|
USF-COT/glider_netcdf_writer
|
scripts/scripts-bin/check_glider_netcdf.py
|
Python
|
mit
| 5,287
|
[
"NetCDF"
] |
99d2f5778bda64b5f20450a4a89ccfed9d3729ea90b839f210d5ee4bd6ab62b8
|
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import print_function, division
import numpy as np
import os
import pkg_resources
import unittest
from molmod.periodic import periodic
from molmod.units import angstrom, amu, calorie, avogadro, electronvolt
from molmod.constants import lightspeed
from molmod.test.common import tmpdir
from tamkin import *
__all__ = ["IOTestCase"]
class IOTestCase(unittest.TestCase):
def test_load_fixed_g03com(self):
fixed_atoms = load_fixed_g03com(
pkg_resources.resource_filename("tamkin", "data/test/mat/Zp_p_prod.18aug.com"))
self.assertEqual(len(fixed_atoms), 48)
self.assertEqual(fixed_atoms, list(range(114,114+48)))
def test_load_molecule_g03fchk(self):
atoms = 181
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/mat/Zp_p_react.28aug.fchk"))
self.assertEqual(molecule.hessian.shape,(atoms*3,atoms*3))
self.assertAlmostEqual(molecule.energy, -3053.805846445570, 7)
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/mat/Zp_p_react.28aug.fchk"),
energy=-123.0)
self.assertAlmostEqual(molecule.energy, -123.0, 7)
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/mat/Zp_p_react.28aug.fchk"),
pkg_resources.resource_filename("tamkin", "data/test/mat/Zp_p_react.14mei.fchk"))
self.assertAlmostEqual(molecule.energy, -18613.135744186180, 7)
def test_load_molecule_g98fchk(self):
atoms = 6
molecule = load_molecule_g98fchk(
pkg_resources.resource_filename("tamkin", "data/test/g98/freqs.fchk"))
self.assertEqual(molecule.hessian.shape,(atoms*3,atoms*3))
self.assertAlmostEqual(molecule.masses[0]/amu, 12.011)
self.assertAlmostEqual(molecule.masses[2]/amu, 1.0079)
self.assertAlmostEqual(molecule.energy, -78.58745828877478, 7)
molecule = load_molecule_g98fchk(
pkg_resources.resource_filename("tamkin", "data/test/g98/freqs.fchk"),
energy=-123.0)
self.assertAlmostEqual(molecule.energy, -123.0, 7)
def test_load_molecule_g03fchkvdw(self):
atoms = 179
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/matvdw/R.fchk"),
pkg_resources.resource_filename("tamkin", "data/test/matvdw/R_SCF.fchk"),
pkg_resources.resource_filename("tamkin", "data/test/matvdw/R_b3lyp-d.out"))
self.assertEqual(molecule.hessian.shape,(atoms*3,atoms*3))
self.assertAlmostEqual(molecule.energy,-18612.352569964281 , 7)
def test_load_fixed_fchk(self):
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/ethane/gaussian.fchk"))
assert (molecule.fixed == [0, 6, 7]).all()
def test_load_molecule_cp2k(self):
molecule = load_molecule_cp2k(
pkg_resources.resource_filename("tamkin", "data/test/cp2k/pentane/sp.out"),
pkg_resources.resource_filename("tamkin", "data/test/cp2k/pentane/freq.out"))
self.assertAlmostEqual(molecule.energy, 0.012255059530862)
self.assertEqual(molecule.multiplicity, 1)
self.assertEqual(molecule.numbers[0], 6)
self.assertEqual(molecule.numbers[4], 1)
self.assertAlmostEqual(molecule.masses[0], periodic[6].mass, 5)
self.assertAlmostEqual(molecule.masses[4], periodic[1].mass, 0)
self.assertAlmostEqual(molecule.coordinates[5,1]/angstrom, 13.928520)
self.assertAlmostEqual(molecule.gradient[0,2], 0.0000000038, 9)
self.assertAlmostEqual(molecule.gradient[11,0], 0.0000000177, 9)
self.assertAlmostEqual(molecule.hessian[0,0], 49.629809*1e-6*molecule.masses[0], 6)
self.assertAlmostEqual(molecule.hessian[-1,-1], 287.884198*1e-6*molecule.masses[-1], 6)
self.assertAlmostEqual(molecule.unit_cell.matrix[0,0]/angstrom, 30.000,3)
self.assertAlmostEqual(molecule.unit_cell.matrix[1,2]/angstrom, 0.000,3)
def test_load_molecule_cp2k_23(self):
molecule = load_molecule_cp2k(
pkg_resources.resource_filename("tamkin", "data/test/cp2k/john/scf.out"),
pkg_resources.resource_filename("tamkin", "data/test/cp2k/john/quickie.out"))
self.assertAlmostEqual(molecule.energy, -141.189006820072791)
self.assertEqual(molecule.multiplicity, 1)
self.assertEqual(molecule.numbers[0], 14)
self.assertEqual(molecule.numbers[4], 8)
self.assertAlmostEqual(molecule.masses[0], periodic[14].mass)
self.assertAlmostEqual(molecule.masses[4], periodic[8].mass)
self.assertAlmostEqual(molecule.coordinates[5,1]/angstrom, 12.150867)
self.assertAlmostEqual(molecule.gradient[0,2], -0.00008196, 9)
self.assertAlmostEqual(molecule.gradient[11,0], -0.00041872, 9)
self.assertAlmostEqual(molecule.hessian[0,0], 8.297378*1e-6*molecule.masses[0], 6)
self.assertAlmostEqual(molecule.hessian[-1,-1], 71.133554*1e-6*molecule.masses[-1], 6)
self.assertAlmostEqual(molecule.unit_cell.matrix[0,0]/angstrom, 20.000,3)
self.assertAlmostEqual(molecule.unit_cell.matrix[1,2]/angstrom, 0.000,3)
def test_load_molecule_cp2k_dan_phva(self):
molecule = load_molecule_cp2k(
pkg_resources.resource_filename("tamkin", "data/test/cp2k/dan/cp2k.out"),
pkg_resources.resource_filename("tamkin", "data/test/cp2k/dan/freq.out"))
self.assertAlmostEqual(molecule.energy, -290.409097595743333)
self.assertEqual(molecule.multiplicity, 1)
self.assertEqual(molecule.numbers[0], 6)
self.assertEqual(molecule.numbers[4], 6)
self.assertEqual(molecule.numbers[-1], 1)
self.assertAlmostEqual(molecule.masses[0], periodic[6].mass, 0)
self.assertAlmostEqual(molecule.masses[-1], periodic[1].mass, 0)
self.assertAlmostEqual(molecule.coordinates[5,1], -2.458266*angstrom)
self.assertAlmostEqual(molecule.gradient[-1,0], 0.00000044, 9)
self.assertAlmostEqual(molecule.gradient[-3,2], 0.00000080, 9)
self.assertAlmostEqual(molecule.hessian[0,0], 0.0, 6)
self.assertAlmostEqual(molecule.hessian[0,-1], 0.0, 6)
self.assertAlmostEqual(molecule.hessian[-1,0], 0.0, 6)
self.assertAlmostEqual(molecule.hessian[-18,-1],
0.5*(-0.418336-0.418196)*1e-6*(molecule.masses[-1]*molecule.masses[-18])**0.5, 6)
self.assertAlmostEqual(molecule.hessian[-18,-18], 19.137757*1e-6*molecule.masses[-18], 6)
self.assertAlmostEqual(molecule.hessian[-1,-1], 51.483068*1e-6*molecule.masses[-1], 6)
self.assertAlmostEqual(molecule.unit_cell.matrix[0,0], 10.657*angstrom, 3)
self.assertAlmostEqual(molecule.unit_cell.matrix[1,0], -6.153*angstrom, 3)
def test_load_fixed_cp2k(self):
fixed = load_fixed_cp2k(
pkg_resources.resource_filename("tamkin", "data/test/cp2k/dan/freq.out"))
np.testing.assert_equal(fixed, np.arange(52 - 6))
def test_load_molecule_cpmd(self):
molecule = load_molecule_cpmd(
pkg_resources.resource_filename("tamkin", "data/test/cpmd/damp.out"),
pkg_resources.resource_filename("tamkin", "data/test/cpmd/GEOMETRY.xyz"),
pkg_resources.resource_filename("tamkin", "data/test/cpmd/MOLVIB"))
self.assertAlmostEqual(molecule.energy, -17.14142079)
self.assertEqual(molecule.multiplicity, 1)
self.assertEqual(molecule.numbers[0], 8)
self.assertEqual(molecule.numbers[2], 1)
self.assertAlmostEqual(molecule.masses[0]/amu, 15.999400)
self.assertAlmostEqual(molecule.masses[2]/amu, 1.007970)
self.assertAlmostEqual(molecule.coordinates[2,0]/angstrom, .907207019799)
self.assertAlmostEqual(molecule.gradient[0,2], 0.0)
self.assertAlmostEqual(molecule.gradient[2,0], 0.0)
self.assertAlmostEqual(molecule.hessian[0,0], 0.530679165332463953, 6)
self.assertAlmostEqual(molecule.hessian[-1,-1], 0.921045226686428159E-01, 6)
def test_load_molecule_charmm(self):
molecule = load_molecule_charmm(
pkg_resources.resource_filename("tamkin", "data/test/an/ethanol.cor"),
pkg_resources.resource_filename("tamkin", "data/test/an/ethanol.hess.full"))
self.assertAlmostEqual(molecule.energy/(1000*calorie/avogadro), -2.1303308955)
self.assertEqual(molecule.multiplicity, 1)
self.assertEqual(molecule.numbers[0], 6)
self.assertEqual(molecule.numbers[4], 1)
self.assertAlmostEqual(molecule.masses[0]/amu, 12.01100)
self.assertAlmostEqual(molecule.masses[4]/amu, 1.00800)
self.assertAlmostEqual(molecule.coordinates[5,1]/angstrom, 1.3582528196)
self.assertAlmostEqual(molecule.gradient[0,2]/(1000*calorie/avogadro/angstrom), -0.0000000007, 9)
self.assertAlmostEqual(molecule.gradient[8,0]/(1000*calorie/avogadro/angstrom), -0.0000001462, 9)
self.assertAlmostEqual(molecule.hessian[0,0]/(1000*calorie/avogadro /angstrom**2), 1409.7091337384, 6)
self.assertAlmostEqual(molecule.hessian[-1,-1]/(1000*calorie/avogadro /angstrom**2), 474.7950312957, 6)
def test_load_molecule_qchem(self):
molecule = load_molecule_qchem(
pkg_resources.resource_filename("tamkin", "data/test/qchem/h2o2.hf.sto-3g.freq.out"),
hessfile=pkg_resources.resource_filename("tamkin", "data/test/qchem/hessian.dat"))
self.assertAlmostEqual(molecule.energy, -148.7649966058)
self.assertEqual(molecule.multiplicity, 1)
self.assertEqual(molecule.numbers[0], 1)
self.assertEqual(molecule.numbers[3], 8)
self.assertAlmostEqual(molecule.masses[0]/amu, 1.00783, 5)
self.assertAlmostEqual(molecule.masses[3]/amu, 15.99491, 5)
self.assertAlmostEqual(molecule.coordinates[2,1]/angstrom, -0.688720)
self.assertAlmostEqual(molecule.gradient[0,2]/(1000*calorie/avogadro/angstrom), -0.00, 5)
self.assertAlmostEqual(molecule.gradient[3,0]/(1000*calorie/avogadro/angstrom), -0.00, 5)
self.assertAlmostEqual(molecule.hessian[0,0]/(1000*calorie/avogadro/angstrom**2), 364.769480916757800060, 6)
self.assertAlmostEqual(molecule.hessian[-1,-1]/(1000*calorie/avogadro/angstrom**2), 338.870127396983150447, 6)
def test_load_molecule_vasp_53(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/lucas/vasp_5_3_5_complex/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/lucas/vasp_5_3_5_complex/OUTCAR_freq"))
# contcar
assert molecule.numbers[0] == 6
assert (molecule.numbers[1:] == 1).all()
assert molecule.size == 5
assert molecule.unit_cell.matrix[0,0] == 15.0*angstrom
assert molecule.unit_cell.matrix[1,2] == 0.0
self.assertAlmostEqual(molecule.coordinates[0,0]/angstrom, 7.15840, 3)
self.assertAlmostEqual(molecule.coordinates[1,2]/angstrom, 8.44640, 2) #?
self.assertAlmostEqual(molecule.coordinates[-1,-1]/angstrom, 6.95131, 2) #?
# outcar_freq
assert molecule.masses[0] == 12.011*amu
assert (molecule.masses[1:] == 1.000*amu).all()
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 53.624756*hunit
assert molecule.hessian[-1,-1] == 31.299419*hunit
self.assertAlmostEqual(molecule.hessian[2,5], 0.5*(-7.551817 + 3.319877)*hunit)
assert molecule.energy == -24.11901936*electronvolt
gunit = electronvolt/angstrom
assert molecule.gradient[0, 0] == 0.096977*gunit
assert molecule.gradient[2, 1] == 0.100275*gunit
assert molecule.gradient[-1, -1] == -0.212810*gunit
def test_load_molecule_vasp_5_3_5_gamma(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/OUTCAR_freq"))
# contcar
assert molecule.numbers[0] == 6
assert (molecule.numbers[1:] == 1).all()
assert molecule.size == 5
assert molecule.unit_cell.matrix[0,0] == 15.0*angstrom
assert molecule.unit_cell.matrix[1,2] == 0.0
self.assertAlmostEqual(molecule.coordinates[0,0]/angstrom, 7.15782, 3)
self.assertAlmostEqual(molecule.coordinates[1,2]/angstrom, 8.44278, 1) #??
self.assertAlmostEqual(molecule.coordinates[-1,-1]/angstrom, 6.95393, 2) #?
# outcar_freq
assert molecule.masses[0] == 12.011*amu
assert (molecule.masses[1:] == 1.000*amu).all()
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 47.756815*hunit
assert molecule.hessian[-1,-1] == 31.561376*hunit
self.assertAlmostEqual(molecule.hessian[2,5], 0.5*(-2.265871 + -3.645039)*hunit)
assert molecule.energy == -24.12364199*electronvolt
gunit = electronvolt/angstrom
assert molecule.gradient[0, 0] == -0.005459*gunit
assert molecule.gradient[2, 1] == -0.008215*gunit
assert molecule.gradient[-1, -1] == 0.003424*gunit
def test_load_molecule_vasp_5_3_5_gamma_part(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/OUTCAR_freq_part"))
# outcar_freq
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 0.0
assert molecule.hessian[2,5] == 0.0
assert molecule.hessian[-1,-1] == 31.561374*hunit
self.assertAlmostEqual(molecule.hessian[6,9], 0.5*(-2.601094 + -2.794160)*hunit)
def test_load_molecule_vasp_5_2_11_complex(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_2_11_complex/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_2_11_complex/OUTCAR_freq"))
# outcar_freq
assert molecule.masses[0] == 12.011*amu
assert (molecule.masses[1:] == 1.000*amu).all()
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 47.762604*hunit
assert molecule.hessian[-1,-1] == 31.565279*hunit
self.assertAlmostEqual(molecule.hessian[2,5], 0.5*(-3.648356 + -2.264335)*hunit)
assert molecule.energy == -24.123642*electronvolt
gunit = electronvolt/angstrom
assert molecule.gradient[0, 0] == -0.005432*gunit
assert molecule.gradient[2, 1] == -0.008204*gunit
assert molecule.gradient[-1, -1] == 0.003425*gunit
def test_load_molecule_vasp_5_2_11_complex_part(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_2_11_complex/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_2_11_complex/OUTCAR_freq_part"))
# outcar_freq
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 0.0
assert molecule.hessian[2,5] == 0.0
assert molecule.hessian[-1,-1] == 31.565272*hunit
self.assertAlmostEqual(molecule.hessian[6,9], 0.5*(-2.600373 + -2.836454)*hunit)
assert molecule.energy == -24.123642*electronvolt
def test_load_molecule_vasp_5_3_3_complex(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_3_complex/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_3_complex/OUTCAR_freq"))
# outcar_freq
assert molecule.masses[0] == 12.011*amu
assert (molecule.masses[1:] == 1.000*amu).all()
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 47.757096*hunit
assert molecule.hessian[-1,-1] == 31.561341*hunit
self.assertAlmostEqual(molecule.hessian[2,5], 0.5*(-3.645047 + -2.265767)*hunit)
assert molecule.energy == -24.12364179*electronvolt
gunit = electronvolt/angstrom
assert molecule.gradient[0, 0] == -0.005431*gunit
assert molecule.gradient[2, 1] == -0.008279*gunit
assert molecule.gradient[-1, -1] == 0.003335*gunit
def test_load_molecule_vasp_5_3_3_complex_part(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_3_complex/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_3_complex/OUTCAR_freq_part"))
# outcar_freq
hunit = electronvolt/angstrom**2
assert molecule.hessian[0,0] == 0.0
assert molecule.hessian[2,5] == 0.0
assert molecule.hessian[-1,-1] == 31.561353*hunit
self.assertAlmostEqual(molecule.hessian[6,9], 0.5*(-2.601060 + -2.794022)*hunit)
def test_load_molecule_vasp_5_3_5_gamma_part_energy(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/OUTCAR_freq_part"),
energy=1.476)
assert molecule.energy == 1.476
def test_load_molecule_vasp_5_3_5_gamma_part_outcar_energy(self):
molecule = load_molecule_vasp(
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/CONTCAR_opt"),
pkg_resources.resource_filename("tamkin", "data/test/julianna/vasp_5_3_5_gamma/OUTCAR_freq_part"),
pkg_resources.resource_filename("tamkin", "data/test/lucas/vasp_5_3_5_complex/OUTCAR_freq"))
assert molecule.energy == -24.11901936*electronvolt
def test_checkpoint(self):
molecule = load_molecule_cp2k(
pkg_resources.resource_filename("tamkin", "data/test/cp2k/pentane/sp.out"),
pkg_resources.resource_filename("tamkin", "data/test/cp2k/pentane/freq.out"))
nma1 = NMA(molecule)
with tmpdir(__name__, 'test_checkpoint') as dn:
fn_out = os.path.join(dn, 'test.chk')
nma1.write_to_file(fn_out)
nma2 = NMA.read_from_file(fn_out)
self.assertEqual(nma1.freqs.shape, nma2.freqs.shape)
self.assertEqual(nma1.modes.shape, nma2.modes.shape)
self.assertEqual(nma1.masses.shape, nma2.masses.shape)
self.assertEqual(nma1.numbers.shape, nma2.numbers.shape)
self.assertEqual(nma1.coordinates.shape, nma2.coordinates.shape)
self.assertEqual(nma1.inertia_tensor.shape, nma2.inertia_tensor.shape)
assert abs(nma1.freqs - nma2.freqs).max()/abs(nma1.freqs).max() < 1e-15
assert abs(nma1.modes - nma2.modes).max()/abs(nma1.modes).max() < 1e-15
assert abs(nma1.masses - nma2.masses).max()/abs(nma1.masses).max() < 1e-15
assert abs(nma1.coordinates - nma2.coordinates).max()/abs(nma1.coordinates).max() < 1e-15
assert abs(nma1.inertia_tensor - nma2.inertia_tensor).max()/abs(nma1.inertia_tensor).max() < 1e-15
assert (nma1.numbers==nma2.numbers).all()
self.assertAlmostEqual(nma1.mass, nma2.mass)
self.assertAlmostEqual(nma1.energy, nma2.energy)
self.assertEqual(nma1.multiplicity, nma2.multiplicity)
self.assertEqual(nma1.symmetry_number, nma2.symmetry_number)
def test_load_indices(self):
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed.07.txt"), groups=True)
self.assertEqual(blocks, [[3,2,6]])
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed.07.txt"))
self.assertEqual(blocks, [3,2,6])
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed.08.txt"))
self.assertEqual(blocks, [5,4,8])
def test_load_indices_ranges(self):
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed_ranges.txt"), groups=True)
self.assertEqual(blocks, [[0, 2, 3, 4], [9, 10, 11, 12, 13, 19], [21]])
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed_ranges.txt"), shift=0)
self.assertEqual(blocks, [1, 3, 4, 5, 10, 11, 12, 13, 14, 20, 22])
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed_ranges_shift.txt"), groups=True)
self.assertEqual(blocks, [[1, 3, 4, 5], [10, 11, 12, 13, 14, 20], [22]])
blocks = load_indices(pkg_resources.resource_filename("tamkin", "data/test/an/fixed_ranges_shift.txt"), shift=0)
self.assertEqual(blocks, [1, 3, 4, 5, 10, 11, 12, 13, 14, 20, 22])
def test_dump_modes_xyz(self):
molecule = load_molecule_charmm(
pkg_resources.resource_filename("tamkin", "data/test/an/ethanol.cor"),
pkg_resources.resource_filename("tamkin", "data/test/an/ethanol.hess.full"))
nma = NMA(molecule)
with tmpdir(__name__, 'test_dump_modes_xyz') as dn:
prefix = os.path.join(dn, 'mode')
dump_modes_xyz(nma, 6, prefix=prefix, amplitude=50.0)
with open(prefix + ".6.xyz") as f:
# 1st line
line = f.readline().strip()
self.assertEqual(line,"9")
# 2nd line
line = f.readline().strip()
self.assertEqual(line,"frame 0")
# 3rd line
line = f.readline()
words = line.split()
self.assertEqual(len(words),4)
self.assertEqual(words[0],"C")
self.assertEqual(words[2],"0.081608346")
for i in range(9):
line = f.readline().strip()
self.assertEqual(line,"9")
def test_dump_modes_gaussian(self):
molecule = load_molecule_charmm(
pkg_resources.resource_filename("tamkin", "data/test/an/ethanol.cor"),
pkg_resources.resource_filename("tamkin", "data/test/an/ethanol.hess.full"))
nma = NMA(molecule)
with tmpdir(__name__, 'test_dump_modes_gaussian') as dn:
# blind test, no double checking of the output file
fn_log = os.path.join(dn, 'modes_gaussian.log')
dump_modes_gaussian(fn_log, nma)
def test_load_dump_indices1(self):
subs = range(10)
with tmpdir(__name__, 'test_load_dump_indices1') as dn:
dump_indices(os.path.join(dn, "subs-atoms.1.txt"), subs, shift=0)
dump_indices(os.path.join(dn, "subs-atoms.2.txt"), subs, shift=1)
dump_indices(os.path.join(dn, "subs-atoms.3.txt"), subs)
subs1 = load_indices(os.path.join(dn, "subs-atoms.1.txt"), shift=0)
subs2 = load_indices(os.path.join(dn, "subs-atoms.2.txt"), shift=-1)
subs22 = load_indices(os.path.join(dn, "subs-atoms.2.txt")) # should not matter
subs3 = load_indices(os.path.join(dn, "subs-atoms.3.txt"))
blocks = [range(10), range(10,20)]
dump_indices(os.path.join(dn, "blocks.1.txt"), blocks, shift=0)
dump_indices(os.path.join(dn, "blocks.2.txt"), blocks, shift=1)
dump_indices(os.path.join(dn, "blocks.3.txt"), blocks)
blocks1 = load_indices(os.path.join(dn, "blocks.1.txt"), shift=0, groups=True)
blocks2 = load_indices(os.path.join(dn, "blocks.2.txt"), shift=-1, groups=True)
blocks22 = load_indices(os.path.join(dn, "blocks.2.txt"), groups=True) # should not matter
blocks3 = load_indices(os.path.join(dn, "blocks.3.txt"), groups=True)
self.assertEqual(len(subs),len(subs1))
for (i,j) in zip(subs,subs1):
self.assertEqual(i,j)
self.assertEqual(len(subs),len(subs2))
for i,j in zip(subs,subs2):
self.assertEqual(i,j)
self.assertEqual(len(subs),len(subs22))
for i,j in zip(subs,subs22):
self.assertEqual(i,j)
self.assertEqual(len(subs),len(subs3))
for i,j in zip(subs,subs3):
self.assertEqual(i,j)
self.assertEqual(len(blocks),len(blocks1))
for bl,bl1 in zip(blocks,blocks1):
for i,j in zip(bl,bl1):
self.assertEqual(i,j)
self.assertEqual(len(blocks),len(blocks2))
for bl,bl1 in zip(blocks,blocks2):
for i,j in zip(bl,bl1):
self.assertEqual(i,j)
self.assertEqual(len(blocks),len(blocks22))
for bl,bl1 in zip(blocks,blocks22):
for i,j in zip(bl,bl1):
self.assertEqual(i,j)
self.assertEqual(len(blocks),len(blocks3))
for bl,bl1 in zip(blocks,blocks3):
for i,j in zip(bl,bl1):
self.assertEqual(i,j)
def test_load_dump_indices2(self):
randint = np.random.randint
for counter in range(20):
for compact in True, False:
shift = randint(-5,5)
indices = []
for i in range(randint(10,20)):
l = list(set(randint(0,10,randint(20))))
if len(l) > 0:
indices.append(l)
indices_flat = sum(indices, [])
with tmpdir(__name__, '{}_{}'.format('test_load_dump_indices2', counter)) as dn:
dump_indices(os.path.join(dn, "indices_blocks.txt"), indices, compact=compact, shift=shift)
dump_indices(os.path.join(dn, "indices_flat.txt"), indices_flat, compact=compact, shift=shift)
check = load_indices(os.path.join(dn, "indices_blocks.txt"), shift=-shift, groups=True)
self.assertEqual(indices, check)
check = load_indices(os.path.join(dn, "indices_blocks.txt"), shift=-shift)
self.assertEqual(indices_flat, check)
check = load_indices(os.path.join(dn, "indices_flat.txt"), shift=-shift, groups=True)
self.assertEqual([indices_flat], check)
check = load_indices(os.path.join(dn, "indices_flat.txt"), shift=-shift)
self.assertEqual(indices_flat, check)
def test_punch(self):
mol0 = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/punch/gaussian.fchk"))
mol1 = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/punch/gaussian.fchk"),
fn_punch=pkg_resources.resource_filename("tamkin", "data/test/punch/fort.7"))
assert abs(mol0.gradient - mol1.gradient).max() < 1e-8
assert abs(mol0.hessian - mol1.hessian).max() < 1e-8
def test_dftd3(self):
assert load_dftd3(
pkg_resources.resource_filename("tamkin", "data/test/dftd3/dftd3.out")) == -0.00330057
def test_dftd_orca(self):
assert load_dftd_orca(
pkg_resources.resource_filename("tamkin", "data/test/matvdw/R_b3lyp-d.out")) == -0.404083275
|
molmod/tamkin
|
tamkin/test/test_io.py
|
Python
|
gpl-3.0
| 28,786
|
[
"Avogadro",
"CP2K",
"CPMD",
"Gaussian"
] |
f64b9ae5fc4f0c6b51491881d4e5466f2a9df9718a41353f7d9e4b77a2eab474
|
"""
Module for rendering bonds
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import logging
import functools
import vtk
import numpy as np
from . import baseRenderer
from . import povrayWriters
from .. import utils
from .. import _rendering
from ...filtering import bonds
from six.moves import range
def _bondGlyphMethod(bondGlyph, bondGlyphSource, *args, **kwargs):
"""Bond glyph method for programmable glyph filter."""
# get vector and position
pointID = bondGlyph.GetPointId()
vector = bondGlyph.GetPointData().GetVectors().GetTuple3(pointID)
pos = bondGlyph.GetPoint()
# set ends for line
bondGlyphSource.SetPoint1(pos)
bondGlyphSource.SetPoint2(pos[0] + vector[0], pos[1] + vector[1], pos[2] + vector[2])
class BondRenderer(baseRenderer.BaseRenderer):
"""
Render a set of bonds.
"""
def __init__(self):
super(BondRenderer, self).__init__()
self._logger = logging.getLogger(__name__ + ".BondRenderer")
def render(self, bondCoords, bondVectors, bondScalars, numSpecies, colouringOptions, bondsOptions, lut):
"""
Render the given bonds.
"""
self._logger.debug("Rendering bonds")
renderBondsTime = time.time()
# SETTINGS
bondThicknessVTK = bondsOptions.bondThicknessVTK
bondNumSides = bondsOptions.bondNumSides
# END SETTINGS
# points
bondPoints = vtk.vtkPoints()
bondPoints.SetData(bondCoords.getVTK())
# poly data
bondPolyData = vtk.vtkPolyData()
bondPolyData.SetPoints(bondPoints)
bondPolyData.GetPointData().SetScalars(bondScalars.getVTK())
bondPolyData.GetPointData().SetVectors(bondVectors.getVTK())
# line source
lineSource = vtk.vtkLineSource()
# tubes
tubes = vtk.vtkTubeFilter()
tubes.SetInputConnection(lineSource.GetOutputPort())
tubes.SetRadius(bondThicknessVTK)
tubes.SetNumberOfSides(bondNumSides)
tubes.SetCapping(1)
# glyph filter
bondGlyphFilter = vtk.vtkProgrammableGlyphFilter()
bondGlyphFilter.SetGlyphMethod(functools.partial(_bondGlyphMethod, bondGlyphFilter, lineSource))
if vtk.vtkVersion.GetVTKMajorVersion() <= 5:
bondGlyphFilter.SetSource(tubes.GetOutput())
bondGlyphFilter.SetInput(bondPolyData)
else:
bondGlyphFilter.SetSourceConnection(tubes.GetOutputPort())
bondGlyphFilter.SetInputData(bondPolyData)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(bondGlyphFilter.GetOutputPort())
mapper.SetLookupTable(lut)
utils.setMapperScalarRange(mapper, colouringOptions, numSpecies)
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(1)
actor.GetProperty().SetLineWidth(bondThicknessVTK)
# time taken
renderBondsTime = time.time() - renderBondsTime
self._logger.debug("Render bonds time: %f s", renderBondsTime)
# store attributes
self._actor = utils.ActorObject(actor)
self._data["Points"] = bondCoords
self._data["Scalars"] = bondScalars
self._data["Vectors"] = bondVectors
self._data["LUT"] = lut
self._data["Bond thickness"] = bondsOptions.bondThicknessPOV
def writePovray(self, filename):
"""Write bonds to POV-Ray file."""
self._logger.debug("Writing bonds to POV-Ray file")
# povray writer
writer = povrayWriters.PovrayBondsWriter()
writer.write(filename, self._data["Points"], self._data["Vectors"], self._data["Scalars"],
self._data["LUT"], self._data["Bond thickness"])
class BondCalculator(object):
"""
Object that calculates bonds between visible atoms.
"""
def __init__(self):
self._logger = logging.getLogger(__name__ + ".BondCalculator")
def calculateBonds(self, inputState, visibleAtoms, scalarsArray, bondMinArray, bondMaxArray, drawList,
maxBondsPerAtom=50):
"""Find bonds."""
self._logger.info("Calculating bonds")
# arrays for results
# TODO: create array within C lib so not so big!
nvis = len(visibleAtoms)
nspecs = len(inputState.specieList)
size = int(nvis * maxBondsPerAtom / 2)
bondArray = np.empty(size, np.int32)
NBondsArray = np.zeros(nvis, np.int32)
bondVectorArray = np.empty(3 * size, np.float64)
bondSpecieCounter = np.zeros((nspecs, nspecs), dtype=np.int32)
maxBond = bondMaxArray.max()
# call C library
status = bonds.calculateBonds(visibleAtoms, inputState.pos, inputState.specie, len(inputState.specieList),
bondMinArray, bondMaxArray, maxBond, maxBondsPerAtom, inputState.cellDims,
inputState.PBC, bondArray, NBondsArray, bondVectorArray, bondSpecieCounter)
if status:
if status == 1:
msg = "Max bonds per atom exceeded! This would suggest you bond range is too big!"
else:
msg = "Error in bonds clib (%d)" % status
raise RuntimeError(msg)
# total number of bonds
NBondsTotal = np.sum(NBondsArray)
self._logger.info("Total number of bonds: %d (x2 for actors)", NBondsTotal)
# resize bond arrays
bondArray.resize(NBondsTotal)
bondVectorArray.resize(NBondsTotal * 3)
# construct bonds arrays for rendering
res = _rendering.makeBondsArrays(visibleAtoms, scalarsArray.getNumpy(), inputState.pos, NBondsArray, bondArray,
bondVectorArray)
bondCoords, bondVectors, bondScalars = res
bondCoords = utils.NumpyVTKData(bondCoords)
bondVectors = utils.NumpyVTKData(bondVectors, name="vectors")
bondScalars = utils.NumpyVTKData(bondScalars, name="colours")
# specie counters
specieList = inputState.specieList
for i in range(nspecs):
syma = specieList[i]
for j in range(i, nspecs):
symb = specieList[j]
# check if selected
pairStr = "%s-%s" % (syma, symb)
pairStr2 = "%s-%s" % (symb, syma)
if pairStr in drawList or pairStr2 in drawList:
NBondsPair = bondSpecieCounter[i][j]
if i != j:
NBondsPair += bondSpecieCounter[j][i]
bondSpecieCounter[i][j] = NBondsPair
bondSpecieCounter[j][i] = NBondsPair
self._logger.info("%d %s - %s bonds", NBondsPair, syma, symb)
return bondCoords, bondVectors, bondScalars, bondSpecieCounter
class DisplacmentVectorCalculator(object):
"""
Calculate displacement vectors.
"""
def __init__(self):
self._logger = logging.getLogger(__name__ + ".DisplacmentVectorCalculator")
def calculateDisplacementVectors(self, pos, refPos, pbc, cellDims, atomList, scalarsArray):
"""Calculate displacement vectors for the set of atoms."""
self._logger.debug("Calculating displacement vectors")
# calculate vectors
numAtoms = len(atomList)
bondVectors = np.empty(3 * numAtoms, np.float64)
drawBondVector = np.empty(numAtoms, np.int32)
numBonds = bonds.calculateDisplacementVectors(atomList, pos, refPos, cellDims, pbc, bondVectors, drawBondVector)
self._logger.debug("Number of displacement vectors to draw = %d (/ %d)", numBonds, numAtoms)
# calculate arrays for rendering
res = _rendering.makeDisplacementVectorBondsArrays(numBonds, atomList, scalarsArray, pos, drawBondVector,
bondVectors)
bondCoords, bondVectors, bondScalars = res
bondCoords = utils.NumpyVTKData(bondCoords)
bondVectors = utils.NumpyVTKData(bondVectors, name="vectors")
bondScalars = utils.NumpyVTKData(bondScalars, name="colours")
return bondCoords, bondVectors, bondScalars
|
chrisdjscott/Atoman
|
atoman/rendering/renderers/bondRenderer.py
|
Python
|
mit
| 8,568
|
[
"VTK"
] |
bcda6c2e05718b1fe07383aef6810e180168c2b8d7bada03cf1713a15ba9bf72
|
# pylint: disable=C0111
# pylint: disable=W0621
import urllib
from lettuce import world
from django.contrib.auth.models import User, Group
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore, clear_existing_modulestores
from xmodule.contentstore.django import _CONTENTSTORE
@world.absorb
def create_user(uname, password):
# If the user already exists, don't try to create it again
if len(User.objects.filter(username=uname)) > 0:
return
portal_user = world.UserFactory.build(username=uname, email=uname + '@edx.org')
portal_user.set_password(password)
portal_user.save()
registration = world.RegistrationFactory(user=portal_user)
registration.register(portal_user)
registration.activate()
world.UserProfileFactory(user=portal_user)
@world.absorb
def log_in(username='robot', password='test', email='robot@edx.org', name="Robot"):
"""
Use the auto_auth feature to programmatically log the user in
"""
url = '/auto_auth'
params = { 'username': username, 'password': password, 'email': email, 'full_name': name }
url += "?" + urllib.urlencode(params)
world.visit(url)
# Save the user info in the world scenario_dict for use in the tests
user = User.objects.get(username=username)
world.scenario_dict['USER'] = user
@world.absorb
def register_by_course_key(course_key, username='robot', password='test', is_staff=False):
create_user(username, password)
user = User.objects.get(username=username)
# Note: this flag makes the user global staff - that is, an edX employee - not a course staff.
# See courseware.tests.factories for StaffFactory and InstructorFactory.
if is_staff:
user.is_staff = True
user.save()
CourseEnrollment.enroll(user, course_key)
@world.absorb
def enroll_user(user, course_key):
# Activate user
registration = world.RegistrationFactory(user=user)
registration.register(user)
registration.activate()
# Enroll them in the course
CourseEnrollment.enroll(user, course_key)
@world.absorb
def clear_courses():
# Flush and initialize the module store
# Note that if your test module gets in some weird state
# (though it shouldn't), do this manually
# from the bash shell to drop it:
# $ mongo test_xmodule --eval "db.dropDatabase()"
modulestore()._drop_database() # pylint: disable=protected-access
_CONTENTSTORE.clear()
clear_existing_modulestores()
|
LICEF/edx-platform
|
common/djangoapps/terrain/course_helpers.py
|
Python
|
agpl-3.0
| 2,503
|
[
"VisIt"
] |
59b4c4afe5bda3d762bbddff07ee5ad785f5c1d6c3c56317ff6efea2aa41183f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.