text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Hou Shaohui
#
# Author: Hou Shaohui <houshao55@gmail.com>
# Maintainer: Hou Shaohui <houshao55@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pycurl
import StringIO
class CurlException(Exception):
pass
class MyCurl(object):
'''Curl Class'''
def __init__(self,
cookie_file=None,
header=None,
proxy_host=None,
proxy_port=None):
self.cookie_file = cookie_file
self.header = header
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def set_cookie_file(self, cookie_file):
self.cookie_file = cookie_file
def get(self,
url,
header=None,
proxy_host=None,
proxy_port=None,
cookie_file=None):
'''
open url width get method
@param url: the url to visit
@param header: the http header
@param proxy_host: the proxy host name
@param proxy_port: the proxy port
'''
crl = pycurl.Curl()
#crl.setopt(pycurl.VERBOSE,1)
crl.setopt(pycurl.NOSIGNAL, 1)
# set proxy
# crl.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
rel_proxy_host = proxy_host or self.proxy_host
if rel_proxy_host:
crl.setopt(pycurl.PROXY, rel_proxy_host)
rel_proxy_port = proxy_port or self.proxy_port
if rel_proxy_port:
crl.setopt(pycurl.PROXYPORT, rel_proxy_port)
# set cookie
rel_cookie_file = cookie_file or self.cookie_file
if rel_cookie_file:
crl.setopt(pycurl.COOKIEFILE, rel_cookie_file)
crl.setopt(pycurl.COOKIEJAR, rel_cookie_file)
# set ssl
crl.setopt(pycurl.SSL_VERIFYPEER, 0)
crl.setopt(pycurl.SSL_VERIFYHOST, 0)
crl.setopt(pycurl.SSLVERSION, 3)
crl.setopt(pycurl.CONNECTTIMEOUT, 10)
crl.setopt(pycurl.TIMEOUT, 300)
crl.setopt(pycurl.HTTPPROXYTUNNEL, 1)
rel_header = header or self.header
if rel_header:
crl.setopt(pycurl.HTTPHEADER, rel_header)
crl.fp = StringIO.StringIO()
if isinstance(url, unicode):
url = str(url)
crl.setopt(pycurl.URL, url)
crl.setopt(crl.WRITEFUNCTION, crl.fp.write)
try:
crl.perform()
except Exception, e:
raise CurlException(e)
crl.close()
return crl.fp.getvalue()
def post(self,
url,
data,
header=None,
proxy_host=None,
proxy_port=None,
cookie_file=None):
'''
open url width post method
@param url: the url to visit
@param data: the data to post
@param header: the http header
@param proxy_host: the proxy host name
@param proxy_port: the proxy port
'''
crl = pycurl.Curl()
#crl.setopt(pycurl.VERBOSE,1)
crl.setopt(pycurl.NOSIGNAL, 1)
# set proxy
rel_proxy_host = proxy_host or self.proxy_host
if rel_proxy_host:
crl.setopt(pycurl.PROXY, rel_proxy_host)
rel_proxy_port = proxy_port or self.proxy_port
if rel_proxy_port:
crl.setopt(pycurl.PROXYPORT, rel_proxy_port)
# set cookie
rel_cookie_file = cookie_file or self.cookie_file
if rel_cookie_file:
crl.setopt(pycurl.COOKIEFILE, rel_cookie_file)
crl.setopt(pycurl.COOKIEJAR, rel_cookie_file)
# set ssl
crl.setopt(pycurl.SSL_VERIFYPEER, 0)
crl.setopt(pycurl.SSL_VERIFYHOST, 0)
crl.setopt(pycurl.SSLVERSION, 3)
crl.setopt(pycurl.CONNECTTIMEOUT, 10)
crl.setopt(pycurl.TIMEOUT, 300)
crl.setopt(pycurl.HTTPPROXYTUNNEL, 1)
rel_header = header or self.header
if rel_header:
crl.setopt(pycurl.HTTPHEADER, rel_header)
crl.fp = StringIO.StringIO()
crl.setopt(crl.POSTFIELDS, data) # post data
if isinstance(url, unicode):
url = str(url)
crl.setopt(pycurl.URL, url)
crl.setopt(crl.WRITEFUNCTION, crl.fp.write)
try:
crl.perform()
except Exception, e:
raise CurlException(e)
crl.close()
return crl.fp.getvalue()
def upload(self,
url,
data,
header=None,
proxy_host=None,
proxy_port=None,
cookie_file=None):
'''
open url with upload
@param url: the url to visit
@param data: the data to upload
@param header: the http header
@param proxy_host: the proxy host name
@param proxy_port: the proxy port
'''
crl = pycurl.Curl()
#crl.setopt(pycurl.VERBOSE,1)
crl.setopt(pycurl.NOSIGNAL, 1)
# set proxy
rel_proxy_host = proxy_host or self.proxy_host
if rel_proxy_host:
crl.setopt(pycurl.PROXY, rel_proxy_host)
rel_proxy_port = proxy_port or self.proxy_port
if rel_proxy_port:
crl.setopt(pycurl.PROXYPORT, rel_proxy_port)
# set cookie
rel_cookie_file = cookie_file or self.cookie_file
if rel_cookie_file:
crl.setopt(pycurl.COOKIEFILE, rel_cookie_file)
crl.setopt(pycurl.COOKIEJAR, rel_cookie_file)
# set ssl
crl.setopt(pycurl.SSL_VERIFYPEER, 0)
crl.setopt(pycurl.SSL_VERIFYHOST, 0)
crl.setopt(pycurl.SSLVERSION, 3)
crl.setopt(pycurl.CONNECTTIMEOUT, 10)
crl.setopt(pycurl.TIMEOUT, 300)
crl.setopt(pycurl.HTTPPROXYTUNNEL, 1)
rel_header = header or self.header
if rel_header:
crl.setopt(pycurl.HTTPHEADER, rel_header)
crl.fp = StringIO.StringIO()
if isinstance(url, unicode):
url = str(url)
crl.setopt(pycurl.URL, url)
crl.setopt(pycurl.HTTPPOST, data) # upload file
crl.setopt(crl.WRITEFUNCTION, crl.fp.write)
try:
crl.perform()
except Exception, e:
raise CurlException(e)
crl.close()
return crl.fp.getvalue()
public_curl = MyCurl(header=['User-agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.4 ' \
'(KHTML, like Gecko) Chrome/22.0.1229.94 Safari/537.4',])
|
dragondjf/QMusic
|
src/dwidgets/coverlrc/mycurl.py
|
Python
|
lgpl-2.1
| 7,129
|
[
"VisIt"
] |
5d0fea59d15dabd1bc2d9f035fbebf7bb079e7c5fe422c462f2d7ea0db16e2f7
|
from __future__ import print_function, division
from sympy.core import Mul
from sympy.functions import DiracDelta, Heaviside
from sympy.core.compatibility import default_sort_key
from sympy.core.singleton import S
def change_mul(node, x):
"""change_mul(node, x)
Rearranges the operands of a product, bringing to front any simple
DiracDelta expression.
If no simple DiracDelta expression was found, then all the DiracDelta
expressions are simplified (using DiracDelta.expand(diracdelta=True, wrt=x)).
Return: (dirac, new node)
Where:
o dirac is either a simple DiracDelta expression or None (if no simple
expression was found);
o new node is either a simplified DiracDelta expressions or None (if it
could not be simplified).
Examples
========
>>> from sympy import DiracDelta, cos
>>> from sympy.integrals.deltafunctions import change_mul
>>> from sympy.abc import x, y
>>> change_mul(x*y*DiracDelta(x)*cos(x), x)
(DiracDelta(x), x*y*cos(x))
>>> change_mul(x*y*DiracDelta(x**2 - 1)*cos(x), x)
(None, x*y*cos(x)*DiracDelta(x - 1)/2 + x*y*cos(x)*DiracDelta(x + 1)/2)
>>> change_mul(x*y*DiracDelta(cos(x))*cos(x), x)
(None, None)
See Also
========
sympy.functions.special.delta_functions.DiracDelta
deltaintegrate
"""
new_args = []
dirac = None
#Sorting is needed so that we consistently collapse the same delta;
#However, we must preserve the ordering of non-commutative terms
c, nc = node.args_cnc()
sorted_args = sorted(c, key=default_sort_key)
sorted_args.extend(nc)
for arg in sorted_args:
if arg.is_Pow and arg.base.func is DiracDelta:
new_args.append(arg.func(arg.base, arg.exp - 1))
arg = arg.base
if dirac is None and (arg.func is DiracDelta and arg.is_simple(x)):
dirac = arg
else:
new_args.append(arg)
if not dirac: # there was no simple dirac
new_args = []
for arg in sorted_args:
if arg.func is DiracDelta:
new_args.append(arg.expand(diracdelta=True, wrt=x))
elif arg.is_Pow and arg.base.func is DiracDelta:
new_args.append(arg.func(arg.base.expand(diracdelta=True, wrt=x), arg.exp))
else:
new_args.append(arg)
if new_args != sorted_args:
nnode = Mul(*new_args).expand()
else: # if the node didn't change there is nothing to do
nnode = None
return (None, nnode)
return (dirac, Mul(*new_args))
def deltaintegrate(f, x):
"""
deltaintegrate(f, x)
The idea for integration is the following:
- If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)),
we try to simplify it.
If we could simplify it, then we integrate the resulting expression.
We already know we can integrate a simplified expression, because only
simple DiracDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the integral,
taking care if we are dealing with a Derivative or with a proper
DiracDelta.
2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do
nothing at all.
- If the node is a multiplication node having a DiracDelta term:
First we expand it.
If the expansion did work, then we try to integrate the expansion.
If not, we try to extract a simple DiracDelta term, then we have two
cases:
1) We have a simple DiracDelta term, so we return the integral.
2) We didn't have a simple term, but we do have an expression with
simplified DiracDelta terms, so we integrate this expression.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.integrals.deltafunctions import deltaintegrate
>>> from sympy import sin, cos, DiracDelta, Heaviside
>>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x)
sin(1)*cos(1)*Heaviside(x - 1)
>>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y)
z**2*DiracDelta(x - z)*Heaviside(y - z)
See Also
========
sympy.functions.special.delta_functions.DiracDelta
sympy.integrals.integrals.Integral
"""
if not f.has(DiracDelta):
return None
from sympy.integrals import Integral, integrate
from sympy.solvers import solve
# g(x) = DiracDelta(h(x))
if f.func == DiracDelta:
h = f.expand(diracdelta=True, wrt=x)
if h == f: # can't simplify the expression
#FIXME: the second term tells whether is DeltaDirac or Derivative
#For integrating derivatives of DiracDelta we need the chain rule
if f.is_simple(x):
if (len(f.args) <= 1 or f.args[1] == 0):
return Heaviside(f.args[0])
else:
return (DiracDelta(f.args[0], f.args[1] - 1) /
f.args[0].as_poly().LC())
else: # let's try to integrate the simplified expression
fh = integrate(h, x)
return fh
elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e
g = f.expand()
if f != g: # the expansion worked
fh = integrate(g, x)
if fh is not None and not isinstance(fh, Integral):
return fh
else:
# no expansion performed, try to extract a simple DiracDelta term
deltaterm, rest_mult = change_mul(f, x)
if not deltaterm:
if rest_mult:
fh = integrate(rest_mult, x)
return fh
else:
deltaterm = deltaterm.expand(diracdelta=True, wrt=x)
if deltaterm.is_Mul: # Take out any extracted factors
deltaterm, rest_mult_2 = change_mul(deltaterm, x)
rest_mult = rest_mult*rest_mult_2
point = solve(deltaterm.args[0], x)[0]
# Return the largest hyperreal term left after
# repeated integration by parts. For example,
#
# integrate(y*DiracDelta(x, 1),x) == y*DiracDelta(x,0), not 0
#
# This is so Integral(y*DiracDelta(x).diff(x),x).doit()
# will return y*DiracDelta(x) instead of 0 or DiracDelta(x),
# both of which are correct everywhere the value is defined
# but give wrong answers for nested integration.
n = (0 if len(deltaterm.args)==1 else deltaterm.args[1])
m = 0
while n >= 0:
r = (-1)**n*rest_mult.diff(x, n).subs(x, point)
if r is S.Zero:
n -= 1
m += 1
else:
if m == 0:
return r*Heaviside(x - point)
else:
return r*DiracDelta(x,m-1)
# In some very weak sense, x=0 is still a singularity,
# but we hope will not be of any practial consequence.
return S.Zero
return None
|
drufat/sympy
|
sympy/integrals/deltafunctions.py
|
Python
|
bsd-3-clause
| 7,395
|
[
"DIRAC"
] |
7481a8e42aed80170c86234105a20faf28ce1cb1d80d5dee91da8d7f17b98db6
|
import tempfile
from pele.systems import AtomicCluster
from pele.potentials import LJ
from pele.utils.xyz import write_xyz
__all__ = ["LJCluster"]
class LJCluster(AtomicCluster):
"""
define the System class for a Lennard-Jones cluster
Parameters
----------
natoms : int
See Also
--------
BaseSystem, AtomicCluster
"""
def __init__(self, natoms):
super(LJCluster, self).__init__()
self.natoms = natoms
self.params.database.accuracy = 1e-3
self.params.basinhopping["temperature"] = 1.0
# self.params.double_ended_connect.NEBparams.reinterpolate = 1
def get_permlist(self):
return [range(self.natoms)]
def get_potential(self):
return LJ()
def get_system_properties(self):
return dict(natoms=int(self.natoms),
potential="LJ cluster",
)
#
# below here is stuff only for the gui
#
def draw(self, coordslinear, index, subtract_com=True): # pragma: no cover
"""
tell the gui how to represent your system using openGL objects
Parameters
----------
coords : array
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so they should be
visually distinct, e.g. different colors. accepted values are 1 or 2
"""
from _opengl_tools import draw_atomic_single_atomtype
draw_atomic_single_atomtype(coordslinear, index, subtract_com=subtract_com)
def load_coords_pymol(self, coordslist, oname, index=1): # pragma: no cover
"""load the coords into pymol
the new object must be named oname so we can manipulate it later
Parameters
----------
coordslist : list of arrays
oname : str
the new pymol object must be named oname so it can be manipulated
later
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so should be
visually distinct, e.g. different colors. accepted values are 1 or 2
Notes
-----
the implementation here is a bit hacky. we create a temporary xyz file from coords
and load the molecule in pymol from this file.
"""
# pymol is imported here so you can do, e.g. basinhopping without installing pymol
import pymol
# create the temporary file
suffix = ".xyz"
f = tempfile.NamedTemporaryFile(mode="w", suffix=suffix)
fname = f.name
# write the coords into the xyz file
from pele.mindist import CoMToOrigin
for coords in coordslist:
coords = CoMToOrigin(coords.copy())
write_xyz(f, coords, title=oname, atomtypes=["LA"])
f.flush()
# load the molecule from the temporary file
pymol.cmd.load(fname)
# get name of the object just create and change it to oname
objects = pymol.cmd.get_object_list()
objectname = objects[-1]
pymol.cmd.set_name(objectname, oname)
# set the representation
pymol.cmd.hide("everything", oname)
pymol.cmd.show("spheres", oname)
# set the color according to index
if index == 1:
pymol.cmd.color("red", oname)
else:
pymol.cmd.color("gray", oname)
#
# only for testing below here
#
def run(): # pragma: no cover
# create the system object
sys = LJCluster(15)
# create a database
db = sys.create_database()
# do a short basinhopping run
bh = sys.get_basinhopping(database=db, outstream=None)
while len(db.minima()) < 2:
bh.run(100)
# try to connect the lowest two minima
min1, min2 = db.minima()[:2]
connect = sys.get_double_ended_connect(min1, min2, db)
connect.connect()
if __name__ == "__main__":
run()
|
smcantab/pele
|
pele/systems/ljcluster.py
|
Python
|
gpl-3.0
| 4,106
|
[
"PyMOL"
] |
2b65038e1b177efcadc7a7df9b30f335d56b08a7b26b7a0b71d405a7467a9aca
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
author: Jim Dalton
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used (memcached or redis)
required: false
default: memcached
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: 1.4.14
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: 11211
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: conditional
default: None
version_added: "1.7"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: ['default']
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
required: false
default: ['default']
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
default: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
extends_documentation_fragment: aws
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- elasticache:
name: "test-please-delete"
state: rebooted
"""
import sys
import time
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
self.name = name
self.engine = engine
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.region = region
self.aws_connect_kwargs = aws_connect_kwargs
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
try:
response = self.conn.create_cache_cluster(cache_cluster_id=self.name,
num_cache_nodes=self.num_nodes,
cache_node_type=self.node_type,
engine=self.engine,
engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
cache_subnet_group_name=self.cache_subnet_group,
preferred_availability_zone=self.zone,
port=self.cache_port)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['CreateCacheClusterResponse']['CreateCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (self.name, self.status))
try:
response = self.conn.delete_cache_cluster(cache_cluster_id=self.name)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('gone')
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
msg = "'%s' is %s. Cannot sync."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
return
if self._requires_destroy_and_create():
if not self.hard_modify:
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
if not self.wait:
msg = "'%s' requires destructive modification. 'wait' must be set to true."
self.module.fail_json(msg=msg % self.name)
self.delete()
self.create()
return
if self._requires_modification():
self.modify()
def modify(self):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
response = self.conn.modify_cache_cluster(cache_cluster_id=self.name,
num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
apply_immediately=True,
engine_version=self.cache_engine_version)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['ModifyCacheClusterResponse']['ModifyCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
msg = "'%s' is %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status == 'rebooting':
return
if self.status in ['creating', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
# Collect ALL nodes for reboot
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
try:
response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name,
cache_node_ids_to_reboot=cache_node_ids)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['RebootCacheClusterResponse']['RebootCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
def get_info(self):
"""Return basic info about the cache cluster"""
info = {
'name': self.name,
'status': self.status
}
if self.data:
info['data'] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
status_map = {
'creating': 'available',
'rebooting': 'available',
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))
if awaited_status not in set(status_map.values()):
msg = "'%s' is not a valid awaited status."
self.module.fail_json(msg=msg % awaited_status)
while True:
time.sleep(1)
self._refresh_data()
if self.status == awaited_status:
break
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
modifiable_data = {
'NumCacheNodes': self.num_nodes,
'EngineVersion': self.cache_engine_version
}
for key, value in modifiable_data.iteritems():
if self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) - set(self.cache_security_groups):
return True
# check vpc security groups
vpc_security_groups = []
security_groups = self.data['SecurityGroups'] or []
for sg in security_groups:
vpc_security_groups.append(sg['SecurityGroupId'])
if set(vpc_security_groups) - set(self.security_group_ids):
return True
return False
def _requires_destroy_and_create(self):
"""
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
'node_type': self.data['CacheNodeType'],
'engine': self.data['Engine'],
'cache_port': self._get_port()
}
# Only check for modifications if zone is specified
if self.zone is not None:
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
for key, value in unmodifiable_data.iteritems():
if getattr(self, key) != value:
return True
return False
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
return ElastiCacheConnection(
region=connect_region,
**self.aws_connect_kwargs
)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=e.message)
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
if self.data['Engine'] == 'memcached':
return self.data['ConfigurationEndpoint']['Port']
elif self.data['Engine'] == 'redis':
# Redis only supports a single node (presently) so just use
# the first and only
return self.data['CacheNodes'][0]['Endpoint']['Port']
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(cache_cluster_id=self.name,
show_cache_node_info=True)
except boto.exception.BotoServerError:
self.data = None
self.status = 'gone'
return
cache_cluster_data = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'][0]
self.data = cache_cluster_data
self.status = self.data['CacheClusterStatus']
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
if self.status == 'rebooting cache cluster nodes':
self.status = 'rebooting'
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
if num_nodes_to_remove <= 0:
return None
if not self.hard_modify:
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
return cache_node_ids[-num_nodes_to_remove:]
def main():
argument_spec = ec2_argument_spec()
default = object()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent', 'rebooted']},
name={'required': True},
engine={'required': False, 'default': 'memcached'},
cache_engine_version={'required': False, 'default': '1.4.14'},
node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'},
cache_port={'required': False, 'default': 11211, 'type': 'int'},
cache_subnet_group={'required': False, 'default': None},
cache_security_groups={'required': False, 'default': [default],
'type': 'list'},
security_group_ids={'required': False, 'default': [],
'type': 'list'},
zone={'required': False, 'default': None},
wait={'required': False, 'type' : 'bool', 'default': True},
hard_modify={'required': False, 'type': 'bool', 'default': False}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
name = module.params['name']
state = module.params['state']
engine = module.params['engine']
cache_engine_version = module.params['cache_engine_version']
node_type = module.params['node_type']
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_subnet_group = module.params['cache_subnet_group']
cache_security_groups = module.params['cache_security_groups']
security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
if cache_subnet_group and cache_security_groups == [default]:
cache_security_groups = []
if cache_subnet_group and cache_security_groups:
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
if cache_security_groups == [default]:
cache_security_groups = ['default']
if state == 'present' and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
if not region:
module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
cache_subnet_group,
cache_security_groups,
security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs)
if state == 'present':
elasticache_manager.ensure_present()
elif state == 'absent':
elasticache_manager.ensure_absent()
elif state == 'rebooted':
elasticache_manager.ensure_rebooted()
facts_result = dict(changed=elasticache_manager.changed,
elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
tonyyarusso/ansible-modules-core
|
cloud/amazon/elasticache.py
|
Python
|
gpl-3.0
| 21,321
|
[
"Dalton"
] |
31f375c0241c6d389e44a1ed5f3cb20b9363c1a3c8645600111bd201c67edb6c
|
#!/usr/bin/env python
'''unit testing code for pysam.
Execute in the :file:`tests` directory as it requires the Makefile
and data files located there.
'''
import pysam
import unittest
import os
import re
import sys
import subprocess
import shutil
from TestUtils import checkBinaryEqual
IS_PYTHON3 = sys.version_info[0] >= 3
SAMTOOLS = "samtools"
WORKDIR = "pysam_test_work"
DATADIR = "pysam_data"
def runSamtools(cmd):
'''run a samtools command'''
try:
retcode = subprocess.call(cmd, shell=True,
stderr=subprocess.PIPE)
if retcode < 0:
print("Child was terminated by signal", -retcode)
except OSError as e:
print("Execution failed:", e)
def getSamtoolsVersion():
'''return samtools version'''
with subprocess.Popen(SAMTOOLS, shell=True,
stderr=subprocess.PIPE).stderr as pipe:
lines = b"".join(pipe.readlines())
if IS_PYTHON3:
lines = lines.decode('ascii')
return re.search("Version:\s+(\S+)", lines).groups()[0]
class BinaryTest(unittest.TestCase):
'''test samtools command line commands and compare
against pysam commands.
Tests fail, if the output is not binary identical.
'''
first_time = True
# a dictionary of commands to test
# first entry: (samtools output file, samtools command)
# second entry: (pysam output file, (pysam function, pysam options) )
commands = \
{
"view":
(
("ex1.view", "view ex1.bam > ex1.view"),
("pysam_ex1.view", (pysam.view, "ex1.bam")),
),
"view2":
(
("ex1.view", "view -bT ex1.fa -o ex1.view2 ex1.sam"),
# note that -o ex1.view2 throws exception.
("pysam_ex1.view",
(pysam.view, "-bT ex1.fa -oex1.view2 ex1.sam")),
),
"sort":
(
("ex1.sort.bam", "sort ex1.bam ex1.sort"),
("pysam_ex1.sort.bam", (pysam.sort, "ex1.bam pysam_ex1.sort")),
),
"mpileup":
(
("ex1.pileup", "mpileup ex1.bam > ex1.pileup"),
("pysam_ex1.mpileup", (pysam.mpileup, "ex1.bam")),
),
"depth":
(
("ex1.depth", "depth ex1.bam > ex1.depth"),
("pysam_ex1.depth", (pysam.depth, "ex1.bam")),
),
"faidx":
(
("ex1.fa.fai", "faidx ex1.fa"),
("pysam_ex1.fa.fai", (pysam.faidx, "ex1.fa")),
),
"index":
(
("ex1.bam.bai", "index ex1.bam"),
("pysam_ex1.bam.bai", (pysam.index, "pysam_ex1.bam")),
),
"idxstats":
(
("ex1.idxstats", "idxstats ex1.bam > ex1.idxstats"),
("pysam_ex1.idxstats", (pysam.idxstats, "pysam_ex1.bam")),
),
"fixmate":
(
("ex1.fixmate", "fixmate ex1.bam ex1.fixmate"),
("pysam_ex1.fixmate",
(pysam.fixmate, "pysam_ex1.bam pysam_ex1.fixmate")),
),
"flagstat":
(
("ex1.flagstat", "flagstat ex1.bam > ex1.flagstat"),
("pysam_ex1.flagstat", (pysam.flagstat, "pysam_ex1.bam")),
),
"calmd":
(
("ex1.calmd", "calmd ex1.bam ex1.fa > ex1.calmd"),
("pysam_ex1.calmd", (pysam.calmd, "pysam_ex1.bam ex1.fa")),
),
"merge":
(
("ex1.merge", "merge -f ex1.merge ex1.bam ex1.bam"),
# -f option does not work - following command will cause the subsequent
# command to fail
("pysam_ex1.merge",
(pysam.merge, "pysam_ex1.merge pysam_ex1.bam pysam_ex1.bam")),
),
"rmdup":
(
("ex1.rmdup", "rmdup ex1.bam ex1.rmdup"),
("pysam_ex1.rmdup",
(pysam.rmdup, "pysam_ex1.bam pysam_ex1.rmdup")),
),
"reheader":
(
("ex1.reheader", "reheader ex1.bam ex1.bam > ex1.reheader"),
("pysam_ex1.reheader", (pysam.reheader, "ex1.bam ex1.bam")),
),
"cat":
(
("ex1.cat", "cat ex1.bam ex1.bam > ex1.cat"),
("pysam_ex1.cat", (pysam.cat, "ex1.bam ex1.bam")),
),
"targetcut":
(
("ex1.targetcut", "targetcut ex1.bam > ex1.targetcut"),
("pysam_ex1.targetcut", (pysam.targetcut, "pysam_ex1.bam")),
),
"phase":
(
("ex1.phase", "phase ex1.bam > ex1.phase"),
("pysam_ex1.phase", (pysam.phase, "pysam_ex1.bam")),
),
"import":
(
("ex1.bam", "import ex1.fa.fai ex1.sam.gz ex1.bam"),
("pysam_ex1.bam",
(pysam.samimport, "ex1.fa.fai ex1.sam.gz pysam_ex1.bam")),
),
"bam2fq":
(
("ex1.bam2fq", "bam2fq ex1.bam > ex1.bam2fq"),
("pysam_ex1.bam2fq", (pysam.bam2fq, "pysam_ex1.bam")),
),
"pad2unpad":
(
("ex2.unpad", "pad2unpad -T ex1.fa ex2.bam > ex2.unpad"),
("pysam_ex2.unpad", (pysam.pad2unpad, "-T ex1.fa ex2.bam")),
),
"bamshuf":
(
("ex1.bamshuf.bam", "bamshuf ex1.bam ex1.bamshuf"),
("pysam_ex1.bamshuf.bam",
(pysam.bamshuf, "ex1.bam pysam_ex1.bamshuf")),
),
"bedcov":
(
("ex1.bedcov", "bedcov ex1.bed ex1.bam > ex1.bedcov"),
("pysam_ex1.bedcov", (pysam.bedcov, "ex1.bed ex1.bam")),
),
}
# some tests depend on others. The order specifies in which order
# the samtools commands are executed.
# The first three (faidx, import, index) need to be in that order,
# the rest is arbitrary.
order = ('faidx', 'import', 'index',
# 'pileup1', 'pileup2', deprecated
# 'glfview', deprecated
'view', 'view2',
'sort',
'mpileup',
'depth',
'idxstats',
# 'fixmate',
'flagstat',
# 'calmd',
'merge',
# 'rmdup',
'reheader',
'cat',
'bedcov',
'targetcut',
'phase',
# 'bamshuf',
'bam2fq',
# 'pad2unpad',
)
def setUp(self):
'''setup tests.
For setup, all commands will be run before the first test is
executed. Individual tests will then just compare the output
files.
'''
if BinaryTest.first_time:
# remove previous files
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
pass
# copy the source files to WORKDIR
os.makedirs(WORKDIR)
for f in ("ex1.fa", "ex1.sam.gz",
"ex1.sam", "ex2.bam",
"ex1.bed"):
shutil.copy(os.path.join(DATADIR, f),
os.path.join(WORKDIR, f))
# cd to workdir
savedir = os.getcwd()
os.chdir(WORKDIR)
for label in self.order:
# print ("command=", label)
command = self.commands[label]
# build samtools command and target and run
samtools_target, samtools_command = command[0]
runSamtools(" ".join((SAMTOOLS, samtools_command)))
# get pysam command and run
try:
pysam_target, pysam_command = command[1]
except ValueError as msg:
raise ValueError("error while setting up %s=%s: %s" %
(label, command, msg))
pysam_method, pysam_options = pysam_command
try:
output = pysam_method(*pysam_options.split(" "), raw=True)
except pysam.SamtoolsError as msg:
raise pysam.SamtoolsError(
"error while executing %s: options=%s: msg=%s" %
(label, pysam_options, msg))
if ">" in samtools_command:
with open(pysam_target, "wb") as outfile:
if type(output) == list:
if IS_PYTHON3:
for line in output:
outfile.write(line.encode('ascii'))
else:
for line in output:
outfile.write(line)
else:
outfile.write(output)
os.chdir(savedir)
BinaryTest.first_time = False
samtools_version = getSamtoolsVersion()
def _r(s):
# patch - remove any of the alpha/beta suffixes, i.e., 0.1.12a ->
# 0.1.12
if s.count('-') > 0:
s = s[0:s.find('-')]
return re.sub("[^0-9.]", "", s)
if _r(samtools_version) != _r(pysam.__samtools_version__):
raise ValueError(
"versions of pysam/samtools and samtools differ: %s != %s" %
(pysam.__samtools_version__,
samtools_version))
def checkCommand(self, command):
if command:
samtools_target, pysam_target = self.commands[
command][0][0], self.commands[command][1][0]
samtools_target = os.path.join(WORKDIR, samtools_target)
pysam_target = os.path.join(WORKDIR, pysam_target)
self.assertTrue(
checkBinaryEqual(samtools_target, pysam_target),
"%s failed: files %s and %s are not the same" %
(command, samtools_target, pysam_target))
def testImport(self):
self.checkCommand("import")
def testIndex(self):
self.checkCommand("index")
def testSort(self):
self.checkCommand("sort")
def testMpileup(self):
self.checkCommand("mpileup")
def testDepth(self):
self.checkCommand("depth")
def testIdxstats(self):
self.checkCommand("idxstats")
# def testFixmate(self):
# self.checkCommand("fixmate")
def testFlagstat(self):
self.checkCommand("flagstat")
def testMerge(self):
self.checkCommand("merge")
# def testRmdup(self):
# self.checkCommand("rmdup")
def testReheader(self):
self.checkCommand("reheader")
def testCat(self):
self.checkCommand("cat")
def testTargetcut(self):
self.checkCommand("targetcut")
def testPhase(self):
self.checkCommand("phase")
def testBam2fq(self):
self.checkCommand("bam2fq")
def testBedcov(self):
self.checkCommand("bedcov")
# def testBamshuf(self):
# self.checkCommand("bamshuf")
# def testPad2Unpad(self):
# self.checkCommand("pad2unpad")
# def testPileup1( self ):
# self.checkCommand( "pileup1" )
# def testPileup2( self ):
# self.checkCommand( "pileup2" )
# deprecated
# def testGLFView( self ):
# self.checkCommand( "glfview" )
def testView(self):
self.checkCommand("view")
def testEmptyIndex(self):
self.assertRaises(IOError, pysam.index, "exdoesntexist.bam")
def __del__(self):
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
class StdoutTest(unittest.TestCase):
'''test if stdout can be redirected.'''
def testWithRedirectedStdout(self):
r = pysam.flagstat(os.path.join(DATADIR, "ex1.bam"))
self.assertTrue(len(r) > 0)
def testWithoutRedirectedStdout(self):
r = pysam.flagstat(os.path.join(DATADIR, "ex1.bam"),
catch_stdout=False)
self.assertTrue(len(r) == 0)
if __name__ == "__main__":
# build data files
print ("building data files")
subprocess.call("make -C %s" % DATADIR, shell=True)
print ("starting tests")
unittest.main()
print ("completed tests")
|
brendanofallon/pysam
|
tests/samtools_test.py
|
Python
|
mit
| 12,544
|
[
"pysam"
] |
5fbc8d7b2cad448aa34f6fbfaa8990261c022d5760d1370c4310d9bfe6efd7c1
|
import numpy as nm
from sfepy.base.base import output, OneTypeList, Struct
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import MeshIO
from sfepy.solvers.ts import TimeStepper
from sfepy.base.ioutils import get_trunk, write_dict_hdf5
def _linearize(out, fields, linearization):
new = {}
for key, val in out.iteritems():
field = fields[val.field_name]
new.update(field.create_output(val.data, var_name=key,
dof_names=val.dofs, key=key,
linearization=linearization))
return new
def dump_to_vtk(filename, output_filename_trunk=None, step0=0, steps=None,
fields=None, linearization=None):
"""Dump a multi-time-step results file into a sequence of VTK files."""
def _save_step(suffix, out, mesh):
if linearization is not None:
output('linearizing...')
out = _linearize(out, fields, linearization)
output('...done')
for key, val in out.iteritems():
lmesh = val.get('mesh', mesh)
lmesh.write(output_filename_trunk + '_' + key + suffix,
io='auto', out={key : val})
if hasattr(val, 'levels'):
output('max. refinement per group:', val.levels)
else:
mesh.write(output_filename_trunk + suffix, io='auto', out=out)
output('dumping to VTK...')
io = MeshIO.any_from_filename(filename)
mesh = Mesh.from_file(filename, io=io)
if output_filename_trunk is None:
output_filename_trunk = get_trunk(filename)
try:
ts = TimeStepper(*io.read_time_stepper())
all_steps, times, nts, dts = extract_times(filename)
except ValueError:
output('no time stepping info found, assuming single step')
out = io.read_data(0)
if out is not None:
_save_step('.vtk', out, mesh)
ret = None
else:
ts.times = times
ts.n_step = times.shape[0]
if steps is None:
ii0 = nm.searchsorted(all_steps, step0)
iterator = ((all_steps[ii], times[ii])
for ii in xrange(ii0, len(times)))
else:
iterator = [(step, ts.times[step]) for step in steps]
max_step = all_steps.max()
for step, time in iterator:
output(ts.format % (step, max_step))
out = io.read_data(step)
if out is None: break
_save_step('.' + ts.suffix % step + '.vtk', out, mesh)
ret = ts.suffix
output('...done')
return ret
def extract_times(filename):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
dts : array
The true time deltas.
"""
io = MeshIO.any_from_filename(filename)
steps, times, nts = io.read_times()
dts = nm.ediff1d(times, to_end=0)
return steps, times, nts, dts
def extract_time_history(filename, extract, verbose=True):
"""Extract time history of a variable from a multi-time-step results file.
Parameters
----------
filename : str
The name of file to extract from.
extract : str
The description of what to extract in a string of comma-separated
description items. A description item consists of: name of the variable
to extract, mode ('e' for elements, 'n' for nodes), ids of the nodes or
elements (given by the mode). Example: 'u n 10 15, p e 0' means
variable 'u' in nodes 10, 15 and variable 'p' in element 0.
verbose : bool
Verbosity control.
Returns
-------
ths : dict
The time histories in a dict with variable names as keys. If a nodal
variable is requested in elements, its value is a dict of histories in
the element nodes.
ts : TimeStepper instance
The time stepping information.
"""
output('extracting selected data...', verbose=verbose)
output('selection:', extract, verbose=verbose)
##
# Parse extractions.
pes = OneTypeList(Struct)
for chunk in extract.split(','):
aux = chunk.strip().split()
pes.append(Struct(var=aux[0],
mode=aux[1],
indx=map(int, aux[2:])))
##
# Verify array limits.
mesh = Mesh.from_file(filename)
for pe in pes:
if pe.mode == 'n':
for ii in pe.indx:
if (ii < 0) or (ii >= mesh.n_nod):
raise ValueError('node index 0 <= %d < %d!'
% (ii, mesh.n_nod))
if pe.mode == 'e':
for ii, ie in enumerate(pe.indx[:]):
if (ie < 0) or (ie >= mesh.n_el):
raise ValueError('element index 0 <= %d < %d!'
% (ie, mesh.n_el))
pe.indx[ii] = ie
##
# Extract data.
io = MeshIO.any_from_filename(filename)
ths = {}
for pe in pes:
mode, nname = io.read_data_header(pe.var)
output(mode, nname, verbose=verbose)
if ((pe.mode == 'n' and mode == 'vertex') or
(pe.mode == 'e' and mode == 'cell')):
th = io.read_time_history(nname, pe.indx)
elif pe.mode == 'e' and mode == 'vertex':
conn = mesh.conns[0]
th = {}
for iel in pe.indx:
ips = conn[iel]
th[iel] = io.read_time_history(nname, ips)
else:
raise ValueError('cannot extract cell data %s in nodes!' % pe.var)
ths[pe.var] = th
output('...done', verbose=verbose)
ts = TimeStepper(*io.read_time_stepper())
return ths, ts
def average_vertex_var_in_cells(ths_in):
"""Average histories in the element nodes for each nodal variable
originally requested in elements."""
ths = dict.fromkeys(ths_in.keys())
for var, th in ths_in.iteritems():
aux = dict.fromkeys(th.keys())
for ir, data in th.iteritems():
if isinstance(data, dict):
for ic, ndata in data.iteritems():
if aux[ir] is None:
aux[ir] = ndata
else:
aux[ir] += ndata
aux[ir] /= float(len(data))
else:
aux[ir] = data
ths[var] = aux
return ths
def save_time_history(ths, ts, filename_out):
"""Save time history and time-stepping information in a HDF5 file."""
ths.update({'times' : ts.times, 'dt' : ts.dt})
write_dict_hdf5(filename_out, ths)
def guess_time_units(times):
"""
Given a vector of times in seconds, return suitable time units and
new vector of times suitable for plotting.
Parameters
----------
times : array
The vector of times in seconds.
Returns
-------
new_times : array
The vector of times in `units`.
units : str
The time units.
"""
times = nm.asarray(times)
if (times[-1] / 60.0 / 60.0) > 10.0:
units = 'hours'
new_times = times / 60.0 / 60.0
elif (times[-1] / 60.0) > 10.0:
units = 'min.'
new_times = times / 60.0
else:
units = 's'
new_times = times
return new_times, units
|
RexFuzzle/sfepy
|
sfepy/postprocess/time_history.py
|
Python
|
bsd-3-clause
| 7,496
|
[
"VTK"
] |
640904df029f15c3896cbadd5088f36963569c8831b9930339f1c85990c378eb
|
import types
from DIRAC.Core.Utilities import Time
class DBUtils:
def __init__( self, db, setup ):
self._acDB = db
self._setup = setup
def _retrieveBucketedData( self,
typeName,
startTime,
endTime,
selectFields,
condDict = None,
groupFields = None,
orderFields = None ):
"""
Get data from the DB
Parameters:
- typeName -> typeName
- startTime & endTime -> datetime objects. Do I need to explain the meaning?
- selectFields -> tuple containing a string and a list of fields:
( "SUM(%s), %s/%s", ( "field1name", "field2name", "field3name" ) )
- condDict -> conditions for the query
key -> name of the key field
value -> list of possible values
- groupFields -> list of fields to group by, can be in form
( "%s, %s", ( "field1name", "field2name", "field3name" ) )
- orderFields -> list of fields to order by, can be in form
( "%s, %s", ( "field1name", "field2name", "field3name" )
"""
validCondDict = {}
if type( condDict ) == types.DictType:
for key in condDict:
if type( condDict[ key ] ) in ( types.ListType, types.TupleType ) and len( condDict[ key ] ) > 0:
validCondDict[ key ] = condDict[ key ]
return self._acDB.retrieveBucketedData( self._setup, typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields )
def _getUniqueValues( self, typeName, startTime, endTime, condDict, fieldList ):
stringList = [ "%s" for field in fieldList ]
return self._retrieveBucketedData( typeName,
startTime,
endTime,
( ",".join( stringList ), fieldList ),
condDict,
fieldList )
def _groupByField( self, fieldIndex, dataList ):
"""
From a list of lists/tuples group them into a dict of lists using as key field fieldIndex
"""
groupDict = {}
for row in dataList:
groupingField = row[ fieldIndex ]
if not groupingField in groupDict:
groupDict[ groupingField ] = []
if type( row ) == types.TupleType:
rowL = list( row[ :fieldIndex ] )
rowL.extend( row[ fieldIndex + 1: ] )
row = rowL
else:
del( row[ fieldIndex ] )
groupDict[ groupingField ].append( row )
return groupDict
def _getBins( self, typeName, startTime, endTime ):
return self._acDB.calculateBuckets( self._setup, typeName, startTime, endTime )
def _getBucketLengthForTime( self, typeName, momentEpoch ):
nowEpoch = Time.toEpoch()
return self._acDB.calculateBucketLengthForTime( self._setup, typeName, nowEpoch, momentEpoch )
def _spanToGranularity( self, granularity, bucketsData ):
"""
bucketsData must be a list of lists where each list contains
- field 0: datetime
- field 1: bucketLength
- fields 2-n: numericalFields
"""
normData = {}
def addToNormData( bucketDate, data, proportion = 1.0 ):
if bucketDate in normData:
for iP in range( len( data ) ):
val = data[ iP ]
if val == None:
val = 0
normData[ bucketDate ][iP] += float( val ) * proportion
normData[ bucketDate ][ -1 ] += proportion
else:
normData[ bucketDate ] = []
for fD in data:
if fD == None:
fD = 0
normData[ bucketDate ].append( float( fD ) * proportion )
normData[ bucketDate ].append( proportion )
for bucketData in bucketsData:
bucketDate = bucketData[0]
originalBucketLength = bucketData[1]
bucketValues = bucketData[2:]
if originalBucketLength == granularity:
addToNormData( bucketDate, bucketValues )
else:
startEpoch = bucketDate
endEpoch = bucketDate + originalBucketLength
newBucketEpoch = startEpoch - startEpoch % granularity
if startEpoch == endEpoch:
addToNormData( newBucketEpoch, bucketValues )
else:
while newBucketEpoch < endEpoch:
start = max( newBucketEpoch, startEpoch )
end = min( newBucketEpoch + granularity, endEpoch )
proportion = float( end - start ) / originalBucketLength
addToNormData( newBucketEpoch, bucketValues, proportion )
newBucketEpoch += granularity
return normData
def _sumToGranularity( self, granularity, bucketsData ):
"""
bucketsData must be a list of lists where each list contains
- field 0: datetime
- field 1: bucketLength
- fields 2-n: numericalFields
"""
normData = self._spanToGranularity( granularity, bucketsData )
for bDate in normData:
del( normData[ bDate ][-1] )
return normData
def _averageToGranularity( self, granularity, bucketsData ):
"""
bucketsData must be a list of lists where each list contains
- field 0: datetime
- field 1: bucketLength
- fields 2-n: numericalFields
"""
normData = self._spanToGranularity( granularity, bucketsData )
for bDate in normData:
for iP in range( len( normData[ bDate ] ) ):
normData[ bDate ][iP] = float( normData[ bDate ][iP] ) / normData[ bDate ][-1]
del( normData[ bDate ][-1] )
return normData
def _convertNoneToZero( self, bucketsData ):
"""
Convert None to 0
bucketsData must be a list of lists where each list contains
- field 0: datetime
- field 1: bucketLength
- fields 2-n: numericalFields
"""
for iPos in range( len( bucketsData ) ):
data = bucketsData[iPos]
for iVal in range( 2, len( data ) ):
if data[ iVal ] == None:
data[ iVal ] = 0
return bucketsData
def _fillWithZero( self, granularity, startEpoch, endEpoch, dataDict ):
"""
Fill with zeros missing buckets
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
startBucketEpoch = startEpoch - startEpoch % granularity
for key in dataDict:
currentDict = dataDict[ key ]
for timeEpoch in range( int( startBucketEpoch ), int( endEpoch ), granularity ):
if timeEpoch not in currentDict:
currentDict[ timeEpoch ] = 0
return dataDict
def _getAccumulationMaxValue( self, dataDict ):
"""
Divide by factor the values and get the maximum value
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
maxValue = 0
maxEpoch = 0
for key in dataDict:
currentDict = dataDict[ key ]
for timeEpoch in currentDict:
if timeEpoch > maxEpoch:
maxEpoch = timeEpoch
maxValue = 0
if timeEpoch == maxEpoch:
maxValue += currentDict[ timeEpoch ]
return maxValue
def _getMaxValue( self, dataDict ):
"""
Divide by factor the values and get the maximum value
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
maxValues = {}
for key in dataDict:
currentDict = dataDict[ key ]
for timeEpoch in currentDict:
if timeEpoch not in maxValues:
maxValues[ timeEpoch ] = 0
maxValues[ timeEpoch ] += currentDict[ timeEpoch ]
maxValue = 0
for k in maxValues:
maxValue = max( maxValue, k )
return maxValue
def _divideByFactor( self, dataDict, factor ):
"""
Divide by factor the values and get the maximum value
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
maxValue = 0.0
for key in dataDict:
currentDict = dataDict[ key ]
for timeEpoch in currentDict:
currentDict[ timeEpoch ] /= float( factor )
maxValue = max( maxValue, currentDict[ timeEpoch ] )
return dataDict, maxValue
def _accumulate( self, granularity, startEpoch, endEpoch, dataDict ):
"""
Accumulate all the values.
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
startBucketEpoch = startEpoch - startEpoch % granularity
for key in dataDict:
currentDict = dataDict[ key ]
lastValue = 0
for timeEpoch in range( startBucketEpoch, endEpoch, granularity ):
if timeEpoch in currentDict:
lastValue += currentDict[ timeEpoch ]
currentDict[ timeEpoch ] = lastValue
return dataDict
def stripDataField( self, dataDict, fieldId ):
"""
Strip <fieldId> data and sum the rest as it was data from one key
In:
- dataDict : { 'key' : { <timeEpoch1>: [1, 2, 3],
<timeEpoch2>: [3, 4, 5].. } }
- fieldId : 0
Out
- dataDict : { 'key' : { <timeEpoch1>: 1,
<timeEpoch2>: 3.. } }
- return : [ { <timeEpoch1>: 2, <timeEpoch2>: 4... }
{ <timeEpoch1>: 3, <timeEpoch2>): 5... } ]
"""
remainingData = [{}] #Hack for empty data
for key in dataDict:
for timestamp in dataDict[ key ]:
for iPos in dataDict[ key ][ timestamp ]:
remainingData.append( {} )
break
break
for key in dataDict:
for timestamp in dataDict[ key ]:
strippedField = float( dataDict[ key ][ timestamp ][ fieldId ] )
del( dataDict[ key ][ timestamp ][ fieldId ] )
for iPos in range( len( dataDict[ key ][ timestamp ] ) ):
if timestamp in remainingData[ iPos ]:
remainingData[ iPos ][ timestamp ] += float( dataDict[ key ][ timestamp ][ iPos ] )
else:
remainingData[ iPos ][ timestamp ] = float( dataDict[ key ][ timestamp ][ iPos ] )
dataDict[ key ][ timestamp ] = strippedField
return remainingData
def getKeyValues( self, typeName, condDict ):
"""
Get all valid key values in a type
"""
return self._acDB.getKeyValues( self._setup, typeName, condDict )
def _calculateProportionalGauges( self, dataDict ):
"""
Get a dict with more than one entry per bucket and list
"""
bucketSums = {}
#Calculate total sums in buckets
for key in dataDict:
for timeKey in dataDict[ key ]:
timeData = dataDict[ key ][ timeKey ]
if len( timeData ) < 2:
raise Exception( "DataDict must be of the type { <key>:{ <timeKey> : [ field1, field2, ..] } }. With at least two fields" )
if timeKey not in bucketSums:
bucketSums[ timeKey ] = [ 0, 0, 0]
bucketSums[ timeKey ][0] += timeData[0]
bucketSums[ timeKey ][1] += timeData[1]
bucketSums[ timeKey ][2] += timeData[0] / timeData[1]
#Calculate proportionalFactor
for timeKey in bucketSums:
timeData = bucketSums[ timeKey ]
if bucketSums[ timeKey ][0] == 0:
bucketSums[ timeKey ] = 0
else:
bucketSums[ timeKey ] = ( timeData[0] / timeData[1] ) / timeData[2]
#Calculate proportional Gauges
for key in dataDict:
for timeKey in dataDict[ key ]:
timeData = dataDict[ key ][ timeKey ]
dataDict[ key ][ timeKey ] = [ ( timeData[0] / timeData[1] ) * bucketSums[ timeKey ] ]
return dataDict
def _getBucketTotals( self, dataDict ):
"""
Sum key data and get totals for each bucket
"""
newData = {}
for k in dataDict:
for bt in dataDict[ k ]:
if bt not in newData:
newData[ bt ] = 0.0
newData[ bt ] += dataDict[ k ][ bt ]
return newData
|
vmendez/DIRAC
|
AccountingSystem/private/DBUtils.py
|
Python
|
gpl-3.0
| 11,695
|
[
"DIRAC"
] |
97ace447fceddfc81a0928145d15e3f00025a49675b742d16f918abedf12b529
|
#########################################################################################
# LSF.py
# 10.11.2014
# Author: A.T.
#########################################################################################
""" LSF.py is a DIRAC independent class representing LSF batch system.
LSF objects are used as backend batch system representation for
LocalComputingElement and SSHComputingElement classes
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import re
import subprocess
import shlex
import os
__RCSID__ = "$Id$"
class LSF(object):
def submitJob(self, **kwargs):
"""Submit nJobs to the condor batch system"""
resultDict = {}
MANDATORY_PARAMETERS = ["Executable", "OutputDir", "ErrorDir", "WorkDir", "SubmitOptions", "Queue"]
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict["Status"] = -1
resultDict["Message"] = "No %s" % argument
return resultDict
nJobs = kwargs.get("NJobs", 1)
preamble = kwargs.get("Preamble")
outputs = []
outputDir = kwargs["OutputDir"]
errorDir = kwargs["ErrorDir"]
executable = kwargs["Executable"]
queue = kwargs["Queue"]
submitOptions = kwargs["SubmitOptions"]
outputDir = os.path.expandvars(outputDir)
errorDir = os.path.expandvars(errorDir)
executable = os.path.expandvars(executable)
for _i in range(int(nJobs)):
cmd = "%s; " % preamble if preamble else ""
cmd += "bsub -o %s -e %s -q %s -J DIRACPilot %s %s" % (
outputDir,
errorDir,
queue,
submitOptions,
executable,
)
sp = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = sp.communicate()
status = sp.returncode
if status == 0:
outputs.append(output)
else:
break
if outputs:
resultDict["Status"] = 0
resultDict["Jobs"] = []
for output in outputs:
match = re.search(r"Job <(\d*)>", output)
if match:
resultDict["Jobs"].append(match.groups()[0])
else:
resultDict["Status"] = status
resultDict["Message"] = error
return resultDict
def killJob(self, **kwargs):
"""Kill jobs in the given list"""
resultDict = {}
MANDATORY_PARAMETERS = ["JobIDList"]
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict["Status"] = -1
resultDict["Message"] = "No %s" % argument
return resultDict
jobIDList = kwargs.get("JobIDList")
if not jobIDList:
resultDict["Status"] = -1
resultDict["Message"] = "Empty job list"
return resultDict
successful = []
failed = []
errors = ""
for job in jobIDList:
sp = subprocess.Popen(
shlex.split("bkill %s" % job),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = sp.communicate()
status = sp.returncode
if status != 0:
failed.append(job)
errors += error
else:
successful.append(job)
resultDict["Status"] = 0
if failed:
resultDict["Status"] = 1
resultDict["Message"] = error
resultDict["Successful"] = successful
resultDict["Failed"] = failed
return resultDict
def getCEStatus(self, **kwargs):
"""Method to return information on running and pending jobs."""
resultDict = {}
MANDATORY_PARAMETERS = ["Queue"]
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict["Status"] = -1
resultDict["Message"] = "No %s" % argument
return resultDict
queue = kwargs["Queue"]
cmd = "bjobs -q %s -a" % queue
sp = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = sp.communicate()
status = sp.returncode
if status != 0:
resultDict["Status"] = status
resultDict["Message"] = error
return resultDict
waitingJobs = 0
runningJobs = 0
lines = output.split("\n")
for line in lines:
if line.count("PEND") or line.count("PSUSP"):
waitingJobs += 1
if line.count("RUN") or line.count("USUSP"):
runningJobs += 1
# Final output
resultDict["Status"] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
def getJobStatus(self, **kwargs):
"""Get the status information for the given list of jobs"""
resultDict = {}
MANDATORY_PARAMETERS = ["JobIDList"]
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict["Status"] = -1
resultDict["Message"] = "No %s" % argument
return resultDict
jobIDList = kwargs["JobIDList"]
if not jobIDList:
resultDict["Status"] = -1
resultDict["Message"] = "Empty job list"
return resultDict
cmd = "bjobs " + " ".join(jobIDList)
sp = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = sp.communicate()
status = sp.returncode
if status != 0:
resultDict["Status"] = status
resultDict["Message"] = error
return resultDict
output = output.replace("\r", "")
lines = output.split("\n")
statusDict = {}
for job in jobIDList:
statusDict[job] = "Unknown"
for line in lines:
if line.find(job) != -1:
if line.find("UNKWN") != -1:
statusDict[job] = "Unknown"
else:
lsfStatus = line.split()[2]
if lsfStatus in ["DONE", "EXIT"]:
statusDict[job] = "Done"
elif lsfStatus in ["RUN", "SSUSP"]:
statusDict[job] = "Running"
elif lsfStatus in ["PEND", "PSUSP"]:
statusDict[job] = "Waiting"
# Final output
status = 0
resultDict["Status"] = 0
resultDict["Jobs"] = statusDict
return resultDict
|
ic-hep/DIRAC
|
src/DIRAC/Resources/Computing/BatchSystems/LSF.py
|
Python
|
gpl-3.0
| 7,238
|
[
"DIRAC"
] |
d4a46abd4524c59f96d1cdd59274fd8ef1787bd86d9b3f1ebddf4c9b752b4b00
|
# -*- coding: utf-8 -*-
""":mod:`itertools` is full of great examples of Python generator
usage. However, there are still some critical gaps. ``iterutils``
fills many of those gaps with featureful, tested, and Pythonic
solutions.
Many of the functions below have two versions, one which
returns an iterator (denoted by the ``*_iter`` naming pattern), and a
shorter-named convenience form that returns a list. Some of the
following are based on examples in itertools docs.
"""
__all__ = ['is_iterable', 'is_scalar', 'split', 'split_iter', 'chunked',
'chunked_iter', 'windowed', 'windowed_iter', 'bucketize',
'partition', 'unique', 'unique_iter', 'one', 'first']
import math
import itertools
import random
from collections import Mapping, Sequence, Set, ItemsView
try:
from typeutils import make_sentinel
_UNSET = make_sentinel('_UNSET')
_REMAP_EXIT = make_sentinel('_REMAP_EXIT')
except ImportError:
_REMAP_EXIT = object()
_UNSET = object()
try:
from itertools import izip
except ImportError:
# Python 3 compat
basestring = (str, bytes)
izip, xrange = zip, range
def is_iterable(obj):
"""Similar in nature to :func:`callable`, ``is_iterable`` returns
``True`` if an object is `iterable`_, ``False`` if not.
>>> is_iterable([])
True
>>> is_iterable(object())
False
.. _iterable: https://docs.python.org/2/glossary.html#term-iterable
"""
try:
iter(obj)
except TypeError:
return False
return True
def is_scalar(obj):
"""A near-mirror of :func:`is_iterable`. Returns ``False`` if an
object is an iterable container type. Strings are considered
scalar as well, because strings are more often treated as whole
values as opposed to iterables of 1-character substrings.
>>> is_scalar(object())
True
>>> is_scalar(range(10))
False
>>> is_scalar('hello')
True
"""
return not is_iterable(obj) or isinstance(obj, basestring)
def is_collection(obj):
"""The opposite of :func:`is_scalar`. Returns ``True`` if an object
is an iterable other than a string.
>>> is_collection(object())
False
>>> is_collection(range(10))
True
>>> is_collection('hello')
False
"""
return is_iterable(obj) and not isinstance(obj, basestring)
def split(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator. Like :meth:`str.split`,
but for all iterables. Returns a list of lists.
>>> split(['hi', 'hello', None, None, 'sup', None, 'soap', None])
[['hi', 'hello'], ['sup'], ['soap']]
See :func:`split_iter` docs for more info.
"""
return list(split_iter(src, sep, maxsplit))
def split_iter(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if maxsplit is not None:
maxsplit = int(maxsplit)
if maxsplit == 0:
yield [src]
return
if callable(sep):
sep_func = sep
elif not is_scalar(sep):
sep = frozenset(sep)
sep_func = lambda x: x in sep
else:
sep_func = lambda x: x == sep
cur_group = []
split_count = 0
for s in src:
if maxsplit is not None and split_count >= maxsplit:
sep_func = lambda x: False
if sep_func(s):
if sep is None and not cur_group:
# If sep is none, str.split() "groups" separators
# check the str.split() docs for more info
continue
split_count += 1
yield cur_group
cur_group = []
else:
cur_group.append(s)
if cur_group or sep is not None:
yield cur_group
return
def chunked(src, size, count=None, **kw):
"""Returns a list of *count* chunks, each with *size* elements,
generated from iterable *src*. If *src* is not evenly divisible by
*size*, the final chunk will have fewer than *size* elements.
Provide the *fill* keyword argument to provide a pad value and
enable padding, otherwise no padding will take place.
>>> chunked(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> chunked(range(10), 3, fill=None)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
>>> chunked(range(10), 3, count=2)
[[0, 1, 2], [3, 4, 5]]
See :func:`chunked_iter` for more info.
"""
chunk_iter = chunked_iter(src, size, **kw)
if count is None:
return list(chunk_iter)
else:
return list(itertools.islice(chunk_iter, count))
def chunked_iter(src, size, **kw):
"""Generates *size*-sized chunks from *src* iterable. Unless the
optional *fill* keyword argument is provided, iterables not even
divisible by *size* will have a final chunk that is smaller than
*size*.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
Note that ``fill=None`` in fact uses ``None`` as the fill value.
"""
# TODO: add count kwarg?
if not is_iterable(src):
raise TypeError('expected an iterable')
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
postprocess = lambda chk: chk
if isinstance(src, basestring):
postprocess = lambda chk, _sep=type(src)(): _sep.join(chk)
cur_chunk = []
i = 0
for item in src:
cur_chunk.append(item)
i += 1
if i % size == 0:
yield postprocess(cur_chunk)
cur_chunk = []
if cur_chunk:
if do_fill:
lc = len(cur_chunk)
cur_chunk[lc:] = [fill_val] * (size - lc)
yield postprocess(cur_chunk)
return
def pairwise(src, count=None, **kw):
"""Convenience function for calling :func:`chunked` with *size* set to
2.
"""
return chunked(src, 2, count, **kw)
def pairwise_iter(src, **kw):
"""Convenience function for calling :func:`chunked_iter` with *size*
set to 2.
"""
return chunked_iter(src, 2, **kw)
def windowed(src, size):
"""Returns tuples with exactly length *size*. If the iterable is
too short to make a window of length *size*, no tuples are
returned. See :func:`windowed_iter` for more.
"""
return list(windowed_iter(src, size))
def windowed_iter(src, size):
"""Returns tuples with length *size* which represent a sliding
window over iterable *src*.
>>> list(windowed_iter(range(7), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]
If the iterable is too short to make a window of length *size*,
then no window tuples are returned.
>>> list(windowed_iter(range(3), 5))
[]
"""
# TODO: lists? (for consistency)
tees = itertools.tee(src, size)
try:
for i, t in enumerate(tees):
for _ in xrange(i):
next(t)
except StopIteration:
return izip([])
return izip(*tees)
def xfrange(stop, start=None, step=1.0):
"""Same as :func:`frange`, but generator-based instead of returning a
list.
>>> tuple(xfrange(1, 3, step=0.75))
(1.0, 1.75, 2.5)
See :func:`frange` for more details.
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
cur = start
while cur < stop:
yield cur
cur += step
def frange(stop, start=None, step=1.0):
"""A :func:`range` clone for float-based ranges.
>>> frange(5)
[0.0, 1.0, 2.0, 3.0, 4.0]
>>> frange(6, step=1.25)
[0.0, 1.25, 2.5, 3.75, 5.0]
>>> frange(100.5, 101.5, 0.25)
[100.5, 100.75, 101.0, 101.25]
>>> frange(5, 0)
[]
>>> frange(5, 0, step=-1.25)
[5.0, 3.75, 2.5, 1.25]
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
count = int(math.ceil((stop - start) / step))
ret = [None] * count
if not ret:
return ret
ret[0] = start
for i in xrange(1, count):
ret[i] = ret[i - 1] + step
return ret
def backoff(start, stop, count=None, factor=2, jitter=lambda x:x):
"""Returns a list of geometrically-increasing floating-point numbers,
suitable for usage with `exponential backoff`_. Exactly like
:func:`backoff_iter`, but without the ``'repeat'`` option for
*count*. See :func:`backoff_iter` for more details.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> backoff(1, 10)
[1.0, 2.0, 4.0, 8.0, 10.0]
"""
if count == 'repeat':
raise ValueError("'repeat' supported in backoff_iter, not backoff")
return list(backoff_iter(start, stop, count=count, factor=factor, jitter=jitter))
def backoff_iter(start, stop, count=None, factor=2, jitter=lambda x:x):
"""Generates a sequence of geometrically-increasing floats, suitable
for usage with `exponential backoff`_. Starts with *start*,
increasing by *factor* until *stop* is reached, optionally
stopping iteration once *count* numbers are yielded. *factor*
defaults to 2. In general retrying with properly-configured
backoff creates a better-behaved component for a larger service
ecosystem.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> list(backoff_iter(1.0, 10.0, count=5))
[1.0, 2.0, 4.0, 8.0, 10.0]
>>> list(backoff_iter(1.0, 10.0, count=8))
[1.0, 2.0, 4.0, 8.0, 10.0, 10.0, 10.0, 10.0]
>>> list(backoff_iter(0.25, 100.0, factor=10))
[0.25, 2.5, 25.0, 100.0]
A simplified usage example:
.. code-block:: python
for timeout in backoff_iter(0.25, 5.0):
try:
res = network_call()
break
except Exception as e:
log(e)
time.sleep(timeout)
An enhancement for large-scale systems would be to add variation
("jitter") to the timeout value. This is done to avoid a
thundering herd on the receiving end of the network call.
Finally, for *count*, the special value ``'repeat'`` can be passed to
continue yielding indefinitely.
"""
if start == 0:
raise ValueError('start must be >= 0, not %r' % start)
if not start < (start * factor):
raise ValueError('start * factor should be greater than start')
stop = float(stop)
if count is None:
count = 1 + math.ceil(math.log(stop/start, factor))
if count != 'repeat' and count < 0:
raise ValueError('count must be greater than 0, not %r' % count)
cur, i = float(start), 0
while count == 'repeat' or i < count:
yield jitter(cur)
i += 1
if cur < stop:
cur *= factor
if cur > stop:
cur = stop
return
def full_jitter(cur, base=0):
"""Pick a random float between base and the current backoff value.
"""
return random.uniform(base, cur)
def bucketize(src, key=None):
"""Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by
truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Note in these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if key is None:
key = bool
if not callable(key):
raise TypeError('expected callable key function')
ret = {}
for val in src:
keyval = key(val)
ret.setdefault(keyval, []).append(val)
return ret
def partition(src, key=None):
"""No relation to :meth:`str.partition`, ``partition`` is like
:func:`bucketize`, but for added convenience returns a tuple of
``(truthy_values, falsy_values)``.
>>> nonempty, empty = partition(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
*key* defaults to :class:`bool`, but can be carefully overridden to
use any function that returns either ``True`` or ``False``.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = partition(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, key)
return bucketized.get(True, []), bucketized.get(False, [])
def unique(src, key=None):
"""``unique()`` returns a list of unique values, as determined by
*key*, in the order they first appeared in the input iterable,
*src*.
>>> ones_n_zeros = '11010110001010010101010'
>>> ''.join(unique(ones_n_zeros))
'10'
See :func:`unique_iter` docs for more details.
"""
return list(unique_iter(src, key))
def unique_iter(src, key=None):
"""Yield unique elements from the iterable, *src*, based on *key*,
in the order in which they first appeared in *src*.
>>> repetitious = [1, 2, 3] * 10
>>> list(unique_iter(repetitious))
[1, 2, 3]
By default, *key* is the object itself, but *key* can either be a
callable or, for convenience, a string name of the attribute on
which to uniqueify objects, falling back on identity when the
attribute is not present.
>>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes']
>>> list(unique_iter(pleasantries, key=lambda x: len(x)))
['hi', 'hello', 'bye']
"""
if not is_iterable(src):
raise TypeError('expected an iterable, not %r' % type(src))
if key is None:
key_func = lambda x: x
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = set()
for i in src:
k = key_func(i)
if k not in seen:
seen.add(k)
yield i
return
def one(src, default=None, key=None):
"""Along the same lines as builtins, :func:`all` and :func:`any`, and
similar to :func:`first`, ``one()`` returns the single object in
the given iterable *src* that evaluates to ``True``, as determined
by callable *key*. If unset, *key* defaults to :class:`bool`. If
no such objects are found, *default* is returned. If *default* is
not passed, ``None` is returned.
If *src* has more than one object that evaluates to ``True``, or
if there is no object that fulfills such condition, return
``False``. It's like an `XOR`_ over an iterable.
>>> one((True, False, False))
True
>>> one((True, False, True))
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
>>> one((True, True), default=False)
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), key=lambda i: i > 40)
42
See `Martín Gaitán's original repo`_ for further use cases.
.. _Martín Gaitán's original repo: https://github.com/mgaitan/one
.. _XOR: https://en.wikipedia.org/wiki/Exclusive_or
"""
the_one = default
for i in src:
if key(i) if key else i:
if the_one:
return default
the_one = i
return the_one
def first(iterable, default=None, key=None):
"""Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*. Similar to :func:`one`.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by Hynek Schlawack, author of `the original standalone module`_.
.. _the original standalone module: https://github.com/hynek/first
"""
if key is None:
for el in iterable:
if el:
return el
else:
for el in iterable:
if key(el):
return el
return default
def default_visit(path, key, value):
# print('visit(%r, %r, %r)' % (path, key, value))
return key, value
# enable the extreme: monkeypatching iterutils with a different default_visit
_orig_default_visit = default_visit
def default_enter(path, key, value):
# print('enter(%r, %r)' % (key, value))
try:
iter(value)
except TypeError:
return value, False
if isinstance(value, basestring):
return value, False
elif isinstance(value, Mapping):
return value.__class__(), ItemsView(value)
elif isinstance(value, Sequence):
return value.__class__(), enumerate(value)
elif isinstance(value, Set):
return value.__class__(), enumerate(value)
return value, False
def default_exit(path, key, old_parent, new_parent, new_items):
# print('exit(%r, %r, %r, %r, %r)'
# % (path, key, old_parent, new_parent, new_items))
ret = new_parent
if isinstance(new_parent, Mapping):
new_parent.update(new_items)
elif isinstance(new_parent, Sequence):
vals = [v for i, v in new_items]
try:
new_parent.extend(vals)
except AttributeError:
ret = new_parent.__class__(vals) # tuples
elif isinstance(new_parent, Set):
vals = [v for i, v in new_items]
try:
new_parent.update(new_items)
except AttributeError:
ret = new_parent.__class__(vals) # frozensets
else:
raise RuntimeError('unexpected iterable type: %r' % type(new_parent))
return ret
def remap(root, visit=default_visit, enter=default_enter, exit=default_exit,
**kwargs):
"""The remap ("recursive map") function is used to traverse and
transform nested structures. Lists, tuples, sets, and dictionaries
are just a few of the data structures commonly nested into
heterogenous tree-like structures that are ubiquitous in
programming. Unfortunately, Python's built-in ways to compactly
manipulate collections are flat. For instance, list comprehensions
may be fast and succinct, but they don't recurse, making it
tedious to quickly apply changes to real-world data.
Here's an example of removing all None-valued items from the data:
>>> from pprint import pprint
>>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None},
... 'Babylon 5': 6, 'Dr. Who': None}
>>> pprint(remap(reviews, lambda p, k, v: v is not None))
{'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}}
Notice how both Nones have been removed despite the nesting in the
dictionary. Not bad for a one-liner, and that's just the beginning.
See `this remap cookbook`_ for more delicious recipes.
.. _this remap cookbook: http://sedimental.org/remap.html
remap takes four main arguments: the object to traverse and three
optional callables which determine how the remapped object will be
created.
Args:
root: The target object to traverse. By default, remap
supports iterables like :class:`list`, :class:`tuple`,
:class:`dict`, and :class:`set`, but any object traversable by
*enter* will work.
visit (callable): This function is called on every item in
*root*. It must accept three positional arguments, *path*,
*key*, and *value*. *path* is simply a tuple of parents'
keys. *visit* should return the new key-value pair. It may
also return ``True`` as shorthand to keep the old item
unmodified, or ``False`` to drop the item from the new
structure. *visit* is called after *enter*, on the new parent.
The *visit* function is called for every item in root,
including duplicate items. For traversable values, it is
called on the new parent object, after all its children
have been visited. The default visit behavior simply
returns the key-value pair unmodified.
enter (callable): This function controls which items in *root*
are traversed. It accepts the same arguments as *visit*: the
path, the key, and the value of the current item. It returns a
pair of the blank new parent, and an iterator over the items
which should be visited. If ``False`` is returned instead of
an iterator, the value will not be traversed.
The *enter* function is only called once per unique value. The
default enter behavior support mappings, sequences, and
sets. Strings and all other iterables will not be traversed.
exit (callable): This function determines how to handle items
once they have been visited. It gets the same three
arguments as the other functions -- *path*, *key*, *value*
-- plus two more: the blank new parent object returned
from *enter*, and a list of the new items, as remapped by
*visit*.
Like *enter*, the *exit* function is only called once per
unique value. The default exit behavior is to simply add
all new items to the new parent, e.g., using
:meth:`list.extend` and :meth:`dict.update` to add to the
new parent. Immutable objects, such as a :class:`tuple` or
:class:`namedtuple`, must be recreated from scratch, but
use the same type as the new parent passed back from the
*enter* function.
reraise_visit (bool): A pragmatic convenience for the *visit*
callable. When set to ``False``, remap ignores any errors
raised by the *visit* callback. Items causing exceptions
are kept. See examples for more details.
remap is designed to cover the majority of cases with just the
*visit* callable. While passing in multiple callables is very
empowering, remap is designed so very few cases should require
passing more than one function.
When passing *enter* and *exit*, it's common and easiest to build
on the default behavior. Simply add ``from boltons.iterutils import
default_enter`` (or ``default_exit``), and have your enter/exit
function call the default behavior before or after your custom
logic. See `this example`_.
Duplicate and self-referential objects (aka reference loops) are
automatically handled internally, `as shown here`_.
.. _this example: http://sedimental.org/remap.html#sort_all_lists
.. _as shown here: http://sedimental.org/remap.html#corner_cases
"""
# TODO: improve argument formatting in sphinx doc
# TODO: enter() return (False, items) to continue traverse but cancel copy?
if not callable(visit):
raise TypeError('visit expected callable, not: %r' % visit)
if not callable(enter):
raise TypeError('enter expected callable, not: %r' % enter)
if not callable(exit):
raise TypeError('exit expected callable, not: %r' % exit)
reraise_visit = kwargs.pop('reraise_visit', True)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % kwargs.keys())
path, registry, stack = (), {}, [(None, root)]
new_items_stack = []
while stack:
key, value = stack.pop()
id_value = id(value)
if key is _REMAP_EXIT:
key, new_parent, old_parent = value
id_value = id(old_parent)
path, new_items = new_items_stack.pop()
value = exit(path, key, old_parent, new_parent, new_items)
registry[id_value] = value
if not new_items_stack:
continue
elif id_value in registry:
value = registry[id_value]
else:
res = enter(path, key, value)
try:
new_parent, new_items = res
except TypeError:
# TODO: handle False?
raise TypeError('enter should return a tuple of (new_parent,'
' items_iterator), not: %r' % res)
if new_items is not False:
# traverse unless False is explicitly passed
registry[id_value] = new_parent
new_items_stack.append((path, []))
if value is not root:
path += (key,)
stack.append((_REMAP_EXIT, (key, new_parent, value)))
if new_items:
stack.extend(reversed(list(new_items)))
continue
if visit is _orig_default_visit:
# avoid function call overhead by inlining identity operation
visited_item = (key, value)
else:
try:
visited_item = visit(path, key, value)
except:
if reraise_visit:
raise
visited_item = True
if visited_item is False:
continue # drop
elif visited_item is True:
visited_item = (key, value)
# TODO: typecheck?
# raise TypeError('expected (key, value) from visit(),'
# ' not: %r' % visited_item)
try:
new_items_stack[-1][1].append(visited_item)
except IndexError:
raise TypeError('expected remappable root, not: %r' % root)
return value
class PathAccessError(KeyError, IndexError, TypeError):
# TODO: could maybe get fancy with an isinstance
def __init__(self, exc, seg, path):
self.exc = exc
self.seg = seg
self.path = path
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r, %r, %r)' % (cn, self.exc, self.seg, self.path)
def __str__(self):
return ('could not access %r from path %r, got error: %r'
% (self.seg, self.path, self.exc))
def get_path(root, path, default=_UNSET):
"""EAFP is great, but the error message on this isn't:
var_key = 'last_key'
x['key'][-1]['other_key'][var_key]
One of get_path's chief aims is to have a good exception that is
better than a plain old KeyError: 'missing_key'
"""
# TODO: integrate default
# TODO: listify kwarg? to allow indexing into sets
# TODO: raise better error on not iterable?
try:
path = path.split('.')
except AttributeError:
pass
cur = root
for seg in path:
try:
cur = cur[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, path)
except TypeError:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
cur = cur[seg]
except (KeyError, IndexError, TypeError):
raise PathAccessError(exc, seg, path)
return cur
# TODO: get_path/set_path
# TODO: recollect()
# TODO: reiter()
"""
May actually be faster to do an isinstance check for a str path
$ python -m timeit -s "x = [1]" "x[0]"
10000000 loops, best of 3: 0.0207 usec per loop
$ python -m timeit -s "x = [1]" "try: x[0] \nexcept: pass"
10000000 loops, best of 3: 0.029 usec per loop
$ python -m timeit -s "x = [1]" "try: x[1] \nexcept: pass"
1000000 loops, best of 3: 0.315 usec per loop
# setting up try/except is fast, only around 0.01us
# actually triggering the exception takes almost 10x as long
$ python -m timeit -s "x = [1]" "isinstance(x, basestring)"
10000000 loops, best of 3: 0.141 usec per loop
$ python -m timeit -s "x = [1]" "isinstance(x, str)"
10000000 loops, best of 3: 0.131 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.')\n except: pass"
1000000 loops, best of 3: 0.443 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.') \nexcept AttributeError: pass"
1000000 loops, best of 3: 0.544 usec per loop
"""
|
suranap/boltons
|
boltons/iterutils.py
|
Python
|
bsd-3-clause
| 30,327
|
[
"VisIt"
] |
540328c64406f3601c411c223b2db2a3a4e28721e1e3ec3504a431e330c646b4
|
import _ast
import ast
from jaspyx.context.block import BlockContext
from jaspyx.visitor import BaseVisitor
class For(BaseVisitor):
def visit_For(self, node):
if node.orelse:
raise Exception('for-else is not supported.')
if isinstance(node.iter, _ast.Call) and isinstance(node.iter.func, _ast.Name) and \
node.iter.func.id == 'range':
if len(node.iter.args) == 1:
start = ast.Num(0)
stop = node.iter.args[0]
step = ast.Num(1)
cmp_op = ast.Lt()
elif len(node.iter.args) == 2:
start = node.iter.args[0]
stop = node.iter.args[1]
step = ast.Num(1)
cmp_op = ast.Lt()
elif len(node.iter.args) == 3:
start = node.iter.args[0]
stop = node.iter.args[1]
step = node.iter.args[2]
if not isinstance(step, _ast.Num):
raise Exception('range() only supports literal numeric step')
if step.n >= 0:
cmp_op = ast.Lt()
else:
cmp_op = ast.Gt()
else:
raise Exception('range() expects 1, 2 or 3 parameters')
self.indent()
self.output('for(')
self.visit(node.target)
self.output(' = ')
self.visit(start)
self.output('; ')
self.visit(
ast.Compare(
node.target,
[cmp_op],
[stop]
)
)
self.output('; ')
self.visit(node.target)
self.output(' += ')
self.visit(step)
self.output(') ')
else:
self.indent()
self.output('for(')
self.visit(node.target)
self.output(' in ')
self.visit(node.iter)
self.output(') ')
self.block(node.body, context=BlockContext(self.stack[-1]))
self.output('\n')
|
iksteen/jaspyx
|
jaspyx/visitor/for_.py
|
Python
|
mit
| 2,102
|
[
"VisIt"
] |
b94473a2b076fd77f7c7fe058f34d12f6cc6c4966c029bff5f460dc2112c67c9
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import logging
logger = logging.getLogger('remindor_qt')
import gettext
from gettext import gettext as _
gettext.textdomain('remindor-common')
from remindor_qt import helpers
from remindor_common.helpers import DateDialogInfo
class DateDialog(QDialog):
update = Signal(str)
def __init__(self, date_s = "", parent = None):
super(DateDialog, self).__init__(parent)
helpers.setup_ui(self, "DateDialog.ui")
self.info = DateDialogInfo(date_s, helpers.database_file())
self.cancel_button = self.findChild(QPushButton, "cancel_button")
self.ok_button = self.findChild(QPushButton, "ok_button")
self.date_label = self.findChild(QLabel, "date_label")
self.date_combo = self.findChild(QComboBox, "date_combo")
self.on_label = self.findChild(QLabel, "on_label")
self.on_combo = self.findChild(QComboBox, "on_combo")
self.on_date = self.findChild(QDateEdit, "on_date")
self.every_label = self.findChild(QLabel, "every_label")
self.every_combo = self.findChild(QComboBox, "every_combo")
self.every_label2 = self.findChild(QLabel, "every_label2")
self.every_spin = self.findChild(QSpinBox, "every_spin")
self.days_label = self.findChild(QLabel, "days_label")
self.from_label = self.findChild(QLabel, "from_label")
self.from_date = self.findChild(QDateEdit, "from_date")
self.from_check = self.findChild(QCheckBox, "from_check")
self.to_label = self.findChild(QLabel, "to_label")
self.to_date = self.findChild(QDateEdit, "to_date")
self.error_label = self.findChild(QLabel, "error_label")
self.error_label.hide()
self.translate()
self.on_date.setDisplayFormat(self.info.qt_date_format)
self.on_date.setDate(QDate.fromString(self.info.once_date, self.info.qt_date_format))
self.from_date.setDisplayFormat(self.info.qt_date_format)
self.from_date.setDate(QDate.fromString(self.info.from_date, self.info.qt_date_format))
self.from_check.setChecked(self.info.check)
self.to_date.setDisplayFormat(self.info.qt_date_format)
self.to_date.setDate(QDate.fromString(self.info.to_date, self.info.qt_date_format))
self.date_combo.setCurrentIndex(self.info.active)
self.every_combo.setCurrentIndex(self.info.every_active)
self.every_spin.setValue(self.info.every_spin)
self.on_date_combo_currentIndexChanged()
def translate(self):
self.setWindowTitle(_("Edit Date"))
self.date_label.setText(_("Date"))
self.on_label.setText(_("On"))
self.every_label.setText(_("Every"))
self.every_label2.setText(_("Every"))
self.from_label.setText(_("From"))
self.to_label.setText(_("To"))
self.error_label.setText(_("From must be before To"))
date_types = [
_("Once"),
_("Every Day"),
_("Every X Days"),
_("Every Xth of the Month"),
_("Every <day>"),
_("Every Other Day"),
_("Next X Days")
]
self.date_combo.clear()
self.date_combo.addItems(date_types)
dates = [
_("Other"),
_("Today"),
_("Tomorrow"),
_("Monday"),
_("Tuesday"),
_("Wednesday"),
_("Thursday"),
_("Friday"),
_("Saturday"),
_("Sunday"),
_("Christmas")
]
self.on_combo.clear()
self.on_combo.addItems(dates)
weekdays = [
_("Monday"),
_("Tuesday"),
_("Wednesday"),
_("Thursday"),
_("Friday"),
_("Saturday"),
_("Sunday"),
_("Weekday"),
_("Weekend")
]
self.every_combo.clear()
self.every_combo.addItems(weekdays)
self.cancel_button.setText(_("Cancel"))
self.ok_button.setText(_("Ok"))
@Slot()
def on_from_date_dateChanged(self):
self.validate_from_to()
@Slot()
def on_to_date_dateChanged(self):
self.validate_from_to()
@Slot()
def on_cancel_button_pressed(self):
self.reject()
@Slot()
def on_ok_button_pressed(self):
index = self.date_combo.currentIndex()
once_index = self.on_combo.currentIndex()
once_date = self.on_date.date().toString(self.info.qt_date_format)
every_index = self.every_combo.currentIndex()
every_spin = self.every_spin.value()
from_date = ""
to_date = ""
if self.from_check.isChecked():
from_date = self.from_date.date().toString(self.info.qt_date_format)
to_date = self.to_date.date().toString(self.info.qt_date_format)
if self.validate_from_to():
self.update.emit(self.info.build_date(index, once_index, once_date, every_index, every_spin, from_date, to_date))
self.accept()
@Slot()
def on_on_combo_currentIndexChanged(self):
index = self.on_combo.currentIndex()
if index == 0:
self.on_date.show()
else:
self.on_date.hide()
@Slot()
def on_date_combo_currentIndexChanged(self):
index = self.date_combo.currentIndex()
self.on_label.hide()
self.on_combo.hide()
self.on_date.hide()
self.every_label.hide()
self.every_combo.hide()
self.every_label2.hide()
self.every_spin.hide()
self.days_label.hide()
self.from_label.hide()
self.from_date.hide()
self.from_check.hide()
self.to_label.hide()
self.to_date.hide()
self.error_label.hide()
if index == self.info.once:
self.on_label.show()
self.on_combo.show()
self.on_on_combo_currentIndexChanged()
elif index == self.info.every_days:
self.every_label2.show()
self.every_label2.setText(_("Every"))
self.every_spin.show()
self.every_spin.setMaximum(600)
self.days_label.show()
self.days_label.setText(_("Day(s)"))
elif index == self.info.every_month:
self.every_label2.show()
self.every_label2.setText(_("Every"))
self.every_spin.show()
self.every_spin.setMaximum(12)
self.days_label.show()
self.days_label.setText(_("of the month"))
elif index == self.info.every:
self.every_label.show()
self.every_combo.show()
elif index == self.info.next_days:
self.every_label2.show()
self.every_label2.setText(_("Next"))
self.every_spin.show()
self.every_spin.setMaximum(600)
self.days_label.show()
self.days_label.setText(tr.days)
if not (index == self.info.once):
self.from_label.show()
self.from_date.show()
self.from_check.show()
if not (index == self.info.next_days):
self.to_label.show()
self.to_date.show()
self.validate_from_to()
self.resize(1, 1)
self.adjustSize()
def validate_from_to(self):
if self.date_combo.currentIndex() != self.info.once and self.date_combo.currentIndex() != self.info.next_days and self.from_check.isChecked():
from_s = self.from_date.date().toString(self.info.qt_date_format)
to_s = self.to_date.date().toString(self.info.qt_date_format)
error = self.info.validate_from_to(from_s, to_s)
if not error:
self.error_label.show()
else:
self.error_label.hide()
return error
return True
|
bhdouglass/remindor-qt
|
remindor_qt/DateDialog.py
|
Python
|
gpl-3.0
| 8,672
|
[
"Brian"
] |
f7a1272f2a1891c6516a5fbe7bb0a90339dd77a228d9a2cf4329a309944ca788
|
# -*- coding: utf-8 -*-
"""
Calculates rho(z) to maintain hydrostatic equilibrium in a thin disc.
Assumes uniform temperature in the disc, and an infinite disc where
rho can be treated (at least locally) as only a function of z.
Created on Mon Jan 20 12:30:06 2014
@author: ibackus
"""
# ICgen packages
import isaac
# External packages
import numpy as np
import scipy
import scipy.integrate as nInt
import scipy.optimize as opt
from scipy.interpolate import interp1d
import pynbody
from pynbody.array import SimArray
from warnings import warn
def rho_z(ICobj, r):
"""
rho,z = rho_z(...)
Calculates rho(z) to maintain hydrostatic equilibrium in a thin disc.
Assumes uniform temperature in the disc, and an infinite disc where
rho can be treated (locally) as only a function of z.
Only calculates for z>=0, since the density is assumed to be symmetric
about z=0
The initial guess for rho (a gaussian) only really seems to work for
Mstar >> Mdisc. Otherwise the solution can diverge violently.
* NUMERICAL CALCULATION OF RHO(Z) *
The calculation proceeds using several steps.
1) Make an initial guess for I, the integral of rho from z to inf. This
is an error function
2) Modify length scale of the initial guess to minimize the residual
for the differential equation governing I. Use this as the new
initial guess.
3) Find the root I(z) for the differential equation governing I, with
the boundary condition that I(0) = sigma/2
4) Set rho = -dI/dz
5) Find the root rho(z) for the diff. eq. governing rho.
6) In order to satisfy the BC on I, scale rho so that:
Integral(rho) = I(0)
7) Repeat (5) and (6) until rho is rescaled by a factor closer to unity
than rho_tol
Steps 5-7 are done because the solution for I does not seem to
satisfy the diff. eq. for rho very well. But doing it this way
allows rho to satisfy the surface density profile
* Arguments *
ICobj - the initial conditions object for which rho is being calculated.
r - The radius at which rho is being calculated. Should have units
* Output *
Returns a 1D SimArray (see pynbody) of rho(z) and a 1D SimArray of z,
with the same units as ICobj.settings.rho_calc.zmax
"""
# Load from ICobj
settings = ICobj.settings
T = ICobj.T(r)
sigma = ICobj.sigma(r)
# Parse settings
rho_tol = settings.rho_calc.rho_tol
nz = settings.rho_calc.nz
zmax = settings.rho_calc.zmax
m = settings.physical.m
M = settings.physical.M
# Physical constants
kB = SimArray(1.0,'k')
G = SimArray(1.0,'G')
# Set up default units
mass_unit = M.units
length_unit = zmax.units
r = (r.in_units(length_unit)).copy()
# Initial conditions/physical parameters
rho_int = 0.5*sigma.in_units(mass_unit/length_unit**2) # Integral of rho from 0 to inf
a = (G*M*m/(kB*T)).in_units(length_unit)
b = (2*np.pi*G*m/(kB*T)).in_units(length_unit/mass_unit)
z0guess = np.sqrt(2*r*r*r/a).in_units(length_unit)# Est. scale height of disk
z0_dummy = (2/(b*sigma)).in_units(length_unit)
z = np.linspace(0.0,zmax,nz)
dz = z[[1]]-z[[0]]
# Echo parameters used
print '***********************************************'
print '* Calculating rho(z)'
print '***********************************************'
print 'sigma = {0} {1}'.format(sigma,sigma.units)
print 'zmax = {0} {1}'.format(zmax,zmax.units)
print 'r = {0} {1}'.format(r,r.units)
print 'molecular mass = {0} {1}'.format(m,m.units)
print 'Star mass = {0} {1}'.format(M,M.units)
print 'Temperature = {0} {1}'.format(T,T.units)
print ''
print 'rho_tol = {0}'.format(rho_tol)
print 'nz = {0}'.format(nz)
print '***********************************************'
print 'a = {0} {1}'.format(a,a.units)
print 'b = {0} {1}'.format(b,b.units)
print 'z0guess = {0} {1}'.format(z0guess,z0guess.units)
print '***********************************************'
print 'z0 (from sech^2) = {0} {1}'.format(z0_dummy,z0_dummy.units)
# --------------------------------------------------------
# STRIP THE UNITS FROM EVERYTHING!!!
# This has to be done because many of the scipy/numpy functions used cannot
# handle pynbody units. Before returning z, rho, or anything else, the
# Units must be re-introduced
# --------------------------------------------------------
rho_int, a, b, z0guess, z0_dummy, z, dz, r, T, sigma \
= isaac.strip_units([rho_int, a, b, z0guess, z0_dummy, z, dz, r, T, sigma])
# --------------------------------------------------------
# Check sigma and T
# --------------------------------------------------------
if sigma < 1e-100:
warn('Sigma too small. setting rho = 0')
rho0 = np.zeros(len(z))
# Set up units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return rho0, z
if T > 1e100:
warn('Temperature too large. Setting rho = 0')
rho0 = np.zeros(len(z))
# Set up units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return rho0, z
# -------------------------------------------------------------------
# FUNCTION DEFINITIONS
# -------------------------------------------------------------------
def dI_dz(I_in):
"""
Finite difference approximation of dI/dz, assuming I is odd around I(0)
"""
I = I_in.copy()
dI = np.zeros(len(I))
# Fourth order center differencing
dI[0] = (-I[2] + 8*I[1] - 7*I[0])/(6*dz)
dI[1] = (-I[3] + 8*I[2] - 6*I[0] - I[1])/(12*dz)
dI[2:-2] = (-I[4:] + 8*I[3:-1] -8*I[1:-3] + I[0:-4])/(12*dz)
# Second order backward differencing for right edge
dI[-2:] = (3*I[-2:] -4*I[-3:-1] + I[-4:-2])/(2*dz)
return dI
def d2I_dz2(I_in):
# Finite difference for d2I/dz2 assuming it is 0 at the origin
I = I_in.copy()
d2I = np.zeros(len(I))
# Boundary condition
d2I[0] = 0
# Centered 4th order finite difference
d2I[1] = (-I[3] + 16*I[2] - 30*I[1] + 16*I[0] -(2*I[0] - I[1]))/(12*dz**2)
d2I[2:-2] = (-I[4:] + 16*I[3:-1] - 30*I[2:-2] + 16*I[1:-3] - I[0:-4])/(12*(dz**2))
# second order backward difference for right edge
d2I[-2:] = (-2*I[-2:] + 5*I[-3:-1] -4*I[-4:-2] + I[-5:-3])/dz**2
return d2I
def Ires(I_in):
"""
Calculate the residual for the differential equation governing I,
the integral of rho from z to "infinity."
"""
# DEFINE INITIAL CONDITION:
I = I_in.copy()
I[0] = rho_int
#I[-1] = 0.0
weight = 1.0
res = d2I_dz2(I) + dI_dz(I)*(a*z/((z**2 + r**2)**(1.5)) + 2*b*(I[0] - I))
return weight*res
def drho_dz(rho_in):
"""
Fourth order, centered finite difference for d(rho)/dz, assumes that
rho is an even function. The right-hand boundary is done using
backward differencing
"""
rho = rho_in.copy()
drho = np.zeros(len(rho))
drho[0] = 0.0 # defined by boundary condition, rho[0] = max(rho)
drho[1] = (-rho[3] + 8*rho[2] - 8*rho[0] + rho[1])/(12*dz)
drho[2:-2] = (-rho[4:] + 8*rho[3:-1] - 8*rho[1:-3] + rho[0:-4])/(12*dz)
drho[-2:] = (3*rho[-2:] - 4*rho[-3:-1] + rho[-4:-2])/(2*dz)
return drho
def residual(rho_in):
"""
Estimate d(rho)/dz
"""
rho = rho_in.copy()
# Estimate integral of rho
I = np.zeros(len(rho))
I[1:] = nInt.cumtrapz(rho,z)
# Estimate residual
res = drho_dz(rho) + a*rho*z/((z**2 + r**2)**(1.5)) + 2*b*rho*I
return res
def erf_res(scale_size):
testfct = rho_int*(1 - scipy.special.erf(z/scale_size))
return abs(Ires(testfct)).sum()
pass
# -------------------------------------------------------------------
# FIND RHO
# -------------------------------------------------------------------
# Estimate the scale length of the error function
z0 = opt.fminbound(erf_res,z0guess/100.0,5.0*z0guess)
print 'Length scale guess: {0} {1}'.format(z0guess, length_unit)
print 'Final length scale: {0} {1}'.format(z0, length_unit)
# Begin by finding I, the integral of rho (from z to inf)
# Assuming rho is gaussian, I is an error function
guess = rho_int*(1 - scipy.special.erf(z/z0))
# Find the root of the differential equation for I
Isol = opt.newton_krylov(Ires,guess,iter=50)
# rho is the negative derivative
rho0 = -dI_dz(Isol)
rhoguess = rho0.copy()
# Now apply the diff eq on rho
maxiter = 50
for n in range(maxiter):
print 'Iteration {0}'.format(n+1)
rho0 = opt.newton_krylov(residual,rho0,iter=50)
rho_scale = rho_int/nInt.cumtrapz(rho0,z)[-1]
print 'Scaling rho by {0}'.format(rho_scale)
rho0 = rho0*rho_scale
if abs(1-rho_scale) < rho_tol - 1:
break
# Re-introduce units
rho0 = isaac.set_units(rho0, mass_unit/length_unit**3)
z = isaac.set_units(z, length_unit)
return SimArray(rho0,'Msol au**-3'), SimArray(z,'au')
def cdfinv_z(z,rho):
"""
Calculates the inverse of the cumulative distribution function for
probability as a function of z for a given r
*** Arguments ***
* z * z positions to calculate over. 1D array
* rho * Density as a function of z. Treated as an un-normalized
probability. 1D array
IF Z doesn't have units, units of 'au' are assumed
*** Returns ***
Returns the inverse normalized CDF as 1D spline interpolation
"""
# Check for units
if pynbody.units.has_units(z):
zunit = z.units
else:
zunit = pynbody.units.au
# Calculate the CDF from prob
nz = len(z)
f = np.zeros(nz)
f[1:] = nInt.cumtrapz(rho,z)
if f.max() <= 0.0:
# The density (rho) is zero here for all z or neg or something.
# Make all particles go to z = 0.0
def finv(m_in):
return m_in*0.0
return finv
f /= f.max()
# Calculate the inverse CDF.
# Assume CDF is approximately monotonic and sort to force it to be
ind = f.argsort()
f = f[ind]
z = z[ind]
# Drop values where CDF is constant (ie, prob = 0)
mask = np.ones(nz,dtype='bool')
for n in range(1,nz):
if f[n] == f[n-1]:
mask[n] = False
f = f[mask]
z = z[mask]
finv_spline = interp1d(f,z,kind='linear')
def finv(m):
return SimArray(finv_spline(m), zunit)
return finv
|
dflemin3/ICgen
|
backup03/calc_rho.py
|
Python
|
mit
| 11,325
|
[
"Gaussian"
] |
25ad8a39401d7912f5fecaadbcd7a3d12350f88aaff7283ee8a8dd2e953158a8
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import threading
import time
import numpy as np
import os
import types
from file import URLGenerator, File
from engine import Trajectory
from bundle import StoredBundle
from condition import Condition
from resource import Resource
from generator import TaskGenerator
from model import Model
from task import Task
from worker import Worker
from logentry import LogEntry
from plan import ExecutionPlan
from mongodb import MongoDBStorage, ObjectStore, FileStore, DataDict, WeakValueCache
import logging
logger = logging.getLogger(__name__)
class Project(object):
"""
A simulation project
Attributes
----------
Notes
-----
You will later create `Scheduler` objects that explicitly correspond to
a specific cue on a specific cluster that is accessible from within this
shared FS resource.
Attributes
----------
name : str
a short descriptive name for the project. This name will be used in the
database creation also.
resource : `Resource`
a resource to run the project on. The resource specifies the memory
storage location. Not necessarily which cluster is used. An example is,
if at an institute several clusters (CPU, GPU) share the same shared FS.
If clusters use the same FS you can run simulations across clusters
without problems and so so this resource is the most top-level
limitation.
files : :class:`Bundle`
a set of file objects that are available in the project and are
believed to be available within the resource as long as the project
lives
trajectories : `ViewBundle`
all `File` object that are of `Trajectory` type and which have a
positive `created` attribute. This means the file was really created
and has not been altered yet.
workers : `Bundle`
a set of all registered `Worker` instanced in the project
files : `Bundle`
a set of file objects that are available in the project and are
believed to be available within the resource as long as the project
lives
models : `Bundle`
a set of stored models in the DB
tasks : `Bundle`
a set of all queued `Task`s in the project
logs : `Bundle`
a set of all stored log entries
data : `Bundle`
a set of `DataDict` objects that represent completely stored files in
the database of arbitrary size
schedulers : set of `Scheduler`
a set of attached schedulers with controlled shutdown and reference
storage : `MongoDBStorage`
the mongodb storage wrapper to access the database of the project
_worker_dead_time : int
the time after which an unresponsive worker is considered dead. Its
tasks will be assigned the state set in
:attr:`_set_task_state_from_dead_workers`.
Default is 60s. Make sure that
the heartbeat of a worker is much less that this.
_set_task_state_from_dead_workers : str
if a worker is dead then its tasks are assigned this state. Default is
``created`` which means the task will be restarted by another worker.
You can also chose ``halt`` or ``cancelled``. See `Task` for details
See also
--------
`Task`
"""
def __init__(self, name):
self.name = name
self.session = None
self.pilot_manager = None
self.schedulers = set()
self.models = StoredBundle()
self.generators = StoredBundle()
self.files = StoredBundle()
self.tasks = StoredBundle()
self.workers = StoredBundle()
self.logs = StoredBundle()
self.data = StoredBundle()
# self.commands = StoredBundle()
self.resource = None
self._all_trajectories = self.files.c(Trajectory)
self.trajectories = self._all_trajectories.v(lambda x: x.created > 0)
self._events = []
# generator for trajectory names
self.traj_name = URLGenerator(
os.path.join(
'sandbox:///projects/',
self.name,
'trajs',
'{count:08d}',
''))
self.storage = None
self._client = None
self._open_db()
self._lock = threading.Lock()
self._event_timer = None
self._stop_event = None
# timeout if a worker is not changing its heartbeat in the last n seconds
self._worker_dead_time = 60
# tasks from dead workers that were started or queue should do what?
self._set_task_state_from_dead_workers = 'created'
# instead mark these as failed and decide manually
# self._set_task_state_from_dead_workers = 'fail'
# or do not care. This is fast but not recommended
# self._set_task_state_from_dead_workers = None
def initialize(self, resource):
"""
Initialize a project with a specific resource.
Notes
-----
This should only be called to setup the project and only the very
first time.
Parameters
----------
resource : `Resource`
the resource used in this project
"""
self.storage.close()
self.resource = resource
st = MongoDBStorage(self.name, 'w')
# st.create_store(ObjectStore('objs', None))
st.create_store(ObjectStore('generators', TaskGenerator))
st.create_store(ObjectStore('files', File))
st.create_store(ObjectStore('resources', Resource))
st.create_store(ObjectStore('models', Model))
st.create_store(ObjectStore('tasks', Task))
st.create_store(ObjectStore('workers', Worker))
st.create_store(ObjectStore('logs', LogEntry))
st.create_store(FileStore('data', DataDict))
# st.create_store(ObjectStore('commands', Command))
st.save(self.resource)
st.close()
self._open_db()
def _open_db(self):
# open DB and load status
self.storage = MongoDBStorage(self.name)
if hasattr(self.storage, 'tasks'):
self.files.set_store(self.storage.files)
self.generators.set_store(self.storage.generators)
self.models.set_store(self.storage.models)
self.tasks.set_store(self.storage.tasks)
self.workers.set_store(self.storage.workers)
self.logs.set_store(self.storage.logs)
self.data.set_store(self.storage.data)
# self.commands.set_store(self.storage.commands)
self.resource = self.storage.resources.find_one({})
self.storage.files.set_caching(True)
self.storage.models.set_caching(WeakValueCache())
self.storage.generators.set_caching(True)
self.storage.tasks.set_caching(True)
self.storage.workers.set_caching(True)
self.storage.resources.set_caching(True)
self.storage.data.set_caching(WeakValueCache())
self.storage.logs.set_caching(WeakValueCache())
# make sure that the file number will be new
self.traj_name.initialize_from_files(self.trajectories)
def reconnect(self):
"""
Reconnect the DB
"""
self._open_db()
def _close_db(self):
self.storage.close()
def close_rp(self):
"""
Close the RP session
Before using RP you need to re-open and then you will run in a
new session.
"""
self._close_rp()
def _close_rp(self):
for r in set(self.schedulers):
r.shut_down(False)
# self.report.header('finalize')
if self.session is not None and not self.session.closed:
self.session.close()
self.files.close()
self.generators.close()
self.models.close()
@classmethod
def list(cls):
"""
List all projects in the DB
Returns
-------
list of str
a list of all project names
"""
storages = MongoDBStorage.list_storages()
return storages
@classmethod
def delete(cls, name):
"""
Delete a complete project
Notes
-----
Attention!!!! This cannot be undone!!!!
Parameters
----------
name : str
the project name to be deleted
"""
MongoDBStorage.delete_storage(name)
def get_scheduler(self, name=None, **kwargs):
"""
Parameters
----------
name : str
name of the scheduler class provided by the `Resource` used in
this project. If None (default) the cluster/queue ``default`` is
used that needs to be implemented for every resource
kwargs : ``**kwargs``
Additional arguments to initialize the cluster scheduler provided
by the `Resource`
Notes
-----
the scheduler is automatically entered/opened so the pilot jobs is
submitted to the queueing system and it counts against your
simulation time! If you do not want to do so directly. Create
the `Scheduler` by yourself and later call ``scheduler.enter(project)``
to start using it. To close the scheduler call ``scheduler.exit()``
Returns
-------
`Scheduler`
the scheduler object that can be used to execute tasks on that
cluster/queue
"""
# get a new scheduler to submit tasks
if name is None:
scheduler = self.resource.default()
else:
scheduler = getattr(self.resource, name)(**kwargs)
# and prepare the scheduler
scheduler.enter(self)
# add the task generating capabilities to the scheduler
map(scheduler.has, self.generators)
scheduler.stage_generators()
return scheduler
def close(self):
"""
Close the project and all related sessions and DB connections
"""
self._close_rp()
self._close_db()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
fail = True
if exc_type is None:
pass
elif issubclass(exc_type, (KeyboardInterrupt, SystemExit)):
# self.report.warn('exit requested\n')
pass
elif issubclass(exc_type, Exception):
# self.report.error('caught exception: %s\n' % exc_type)
fail = False
self.close()
return fail
def queue(self, *tasks):
"""
Submit jobs to the worker queue
Parameters
----------
tasks : (list of) `Task` or `Trajectory`
anything that can be run like a `Task` or a `Trajectory` with engine
"""
for task in tasks:
if isinstance(task, Task):
self.tasks.add(task)
elif isinstance(task, (list, tuple)):
map(self.queue, task)
elif isinstance(task, Trajectory):
if task.engine is not None:
t = task.run()
if t is not None:
self.tasks.add(t)
# else:
# # if the engines can handle some object we parse these into tasks
# for cls, gen in self.file_generators.items():
# if isinstance(task, cls):
# return self.queue(gen(task))
# we do not allow iterators, too dangerous
# elif hasattr(task, '__iter__'):
# map(self.tasks.add, task)
# @property
# def file_generators(self):
# """
# Return a list of file generators the convert certain objects into task
#
# Returns
# -------
# dict object : function -> (list of) `Task`
# """
# d = {}
# for gen in self.generators:
# d.update(gen.file_generators())
#
# return d
def new_trajectory(self, frame, length, engine=None, number=1):
"""
Convenience function to create a new `Trajectory` object
It will use incrementing numbers to create trajectory names used in
the engine executions. Use this function to always get an unused
trajectory name.
Parameters
----------
frame : `File` or `Frame`
if given a `File` it is assumed to be a ``.pdb`` file that contains
initial coordinates. If a frame is given one assumes that this
`Frame` is the initial structure / frame zero in this trajectory
length : int
the length of the trajectory
engine : `Engine` or None
the engine used to generate the trajectory. The engine contains all
the specifics about the trajectory internal structure since it is the
responsibility of the engine to really create the trajectory.
number : int
the number of trajectory objects to be returned. If ``1`` it will be
a single object. Otherwise a list of `Trajectory` objects.
Returns
-------
`Trajectory` or list of `Trajectory`
"""
if number == 1:
traj = Trajectory(next(self.traj_name), frame, length, engine)
return traj
elif number > 1:
return [self.new_trajectory(frame, length, engine) for _ in range(number)]
def on_ntraj(self, numbers):
"""
Return a condition that is true as soon a the project has n trajectories
Parameters
----------
numbers : int or iterator of int
either a single int or an iterator that returns several ints
Returns
-------
`NTrajectories` or generator of `NTrajectories`
the single condition or a generator of conditions matching the ints
in the iterator
"""
if hasattr(numbers, '__iter__'):
return (NTrajectories(self, n) for n in numbers)
else:
return NTrajectories(self, numbers)
def on_nmodel(self, numbers):
"""
Return a condition representing the reach of a certain number of models
Parameters
----------
numbers : int or iterator of int
the number(s) of the models to be reached
Returns
-------
(generator of) `Condition`
a (list of) `Condition`
"""
if hasattr(numbers, '__iter__'):
return (NModels(self, n) for n in numbers)
else:
return NModels(self, numbers)
# todo: move to brain
def find_ml_next_frame(self, n_pick=10):
"""
Find initial frames picked by inverse equilibrium distribution
This is the simplest adaptive strategy possible. Start from the
states more likely if a state has not been seen so much. Effectively
stating that less knowledge of a state implies a higher likelihood to
find a new state.
Parameters
----------
n_pick : int
number of returned trajectories
Returns
-------
list of `Frame`
the list of trajectories with the selected initial points.
"""
if len(self.models) > 0:
model = self.models.last
assert(isinstance(model, Model))
data = model.data
n_states = data['clustering']['k']
modeller = data['input']['modeller']
outtype = modeller.outtype
# the stride of the analyzed trajectories
used_stride = modeller.engine.types[outtype].stride
# all stride for full trajectories
full_strides = modeller.engine.full_strides
frame_state_list = {n: [] for n in range(n_states)}
for nn, dt in enumerate(data['clustering']['dtrajs']):
for mm, state in enumerate(dt):
# if there is a full traj with existing frame, use it
if any([(mm * used_stride) % stride == 0 for stride in full_strides]):
frame_state_list[state].append((nn, mm * used_stride))
c = data['msm']['C']
q = 1.0 / np.sum(c, axis=1)
# remove states that do not have at least one frame
for k in range(n_states):
if len(frame_state_list[k]) == 0:
q[k] = 0.0
# and normalize the remaining ones
q /= np.sum(q)
state_picks = np.random.choice(np.arange(len(q)), size=n_pick, p=q)
filelist = data['input']['trajectories']
picks = [
frame_state_list[state][np.random.randint(0, len(frame_state_list[state]))]
for state in state_picks
]
return [filelist[pick[0]][pick[1]] for pick in picks]
elif len(self.trajectories) > 0:
# otherwise pick random
return [
self.trajectories.pick().pick() for _ in range(n_pick)]
else:
return []
def new_ml_trajectory(self, engine, length, number):
"""
Find trajectories that have initial points picked by inverse eq dist
Parameters
----------
engine : `Engine`
the engine to be used
length : int
length of the trajectories returned
number : int
number of trajectories returned
Returns
-------
list of `Trajectory`
the list of `Trajectory` objects with initial frames chosen using
:meth:`find_ml_next_frame`
See Also
--------
:meth:`find_ml_next_frame`
"""
return [self.new_trajectory(frame, length, engine) for frame in
self.find_ml_next_frame(number)]
def events_done(self):
"""
Check if all events are done
Returns
-------
bool
True if all events are done
"""
return len(self._events) == 0
def add_event(self, event):
"""
Attach an event to the project
These events will not be stored and only run in the current python
session. These are the parts responsible to create tasks given
certain conditions.
Parameters
----------
event : `Event` or generator
the event to be added or a generator function that is then
converted to an `ExecutionPlan`
Returns
-------
`Event`
the actual event used
"""
if isinstance(event, (tuple, list)):
return map(self._events.append, event)
if isinstance(event, types.GeneratorType):
event = ExecutionPlan(event)
self._events.append(event)
logger.info('Events added. Remaining %d' % len(self._events))
self.trigger()
return event
def trigger(self):
"""
Trigger a check of state changes that leads to task execution
This needs to be called regularly to advance the simulation. If not,
certain checks for state change will not be called and no new tasks
will be generated.
"""
with self._lock:
found_iteration = 50 # max iterations for safety
while found_iteration > 0:
found_new_events = False
for event in list(self._events):
if event:
new_events = event.trigger(self)
if new_events:
found_new_events = True
if not event:
# event is finished, clean up
idx = self._events.index(event)
# todo: wait for completion
del self._events[idx]
logger.info('Event finished! Remaining %d' % len(self._events))
if found_new_events:
# if new events or tasks we should re-trigger
found_iteration -= 1
else:
found_iteration = 0
# check worker status and mark as dead if not responding for long times
now = time.time()
for w in self.workers:
if w.state not in ['dead', 'down'] and now - w.seen > self._worker_dead_time:
# make sure it will end and not finish any jobs, just in case
w.command = 'kill'
# and mark it dead
w.state = 'dead'
# search for abandoned tasks and do something with them
if self._set_task_state_from_dead_workers:
for t in self.tasks:
if t.worker == w and t.state in ['queued', 'running']:
t.state = self._set_task_state_from_dead_workers
w.current = None
def run(self):
"""
Starts observing events in the project
This is still somehow experimental and will call a background thread to
call :meth:`Project.trigger` in regular intervals. Make sure to call
:meth:`Project.stop`
before you quit the notebook session or exit. Otherwise there might
be a job in the background left (not confirmed but possible!)
"""
if not self._event_timer:
self._stop_event = threading.Event()
self._event_timer = self.EventTriggerTimer(self._stop_event, self)
self._event_timer.start()
def stop(self):
"""
Stop observing events
"""
if self._event_timer:
self._stop_event.set()
self._event_timer = None
self._stop_event = None
def wait_until(self, condition):
"""
Block until the given condition evaluates to true
Parameters
----------
condition : callable
function that is called in regular intervals. If it evaluates to
True the function returns
"""
while not condition():
self.trigger()
time.sleep(5.0)
class EventTriggerTimer(threading.Thread):
"""
A special thread to call the project trigger mechanism
"""
def __init__(self, event, project):
super(Project.EventTriggerTimer, self).__init__()
self.stopped = event
self.project = project
def run(self):
while not self.stopped.wait(5.0):
self.project.trigger()
class NTrajectories(Condition):
"""
Condition that triggers if a resource has at least n trajectories present
"""
def __init__(self, project, number):
super(NTrajectories, self).__init__()
self.project = project
self.number = number
def check(self):
return len(self.project.trajectories) >= self.number
def __str__(self):
return '#files[%d] >= %d' % (len(self.project.trajectories), self.number)
def __add__(self, other):
if isinstance(other, int):
return NTrajectories(self.project, self.number + other)
return NotImplemented
class NModels(Condition):
"""
Condition that triggers if a resource has at least n models present
"""
def __init__(self, project, number):
super(NModels, self).__init__()
self.project = project
self.number = number
def check(self):
return len(self.project.models) >= self.number
def __str__(self):
return '#models[%d] >= %d' % (len(self.project.models), self.number)
def __add__(self, other):
if isinstance(other, int):
return NModels(self.project, self.number + other)
return NotImplemented
|
thempel/adaptivemd
|
adaptivemd/project.py
|
Python
|
lgpl-2.1
| 24,986
|
[
"MDTraj"
] |
7457a93b5751980f63d212073a4c8d541f2dfca154c1315b1c580023a9feafb2
|
from __future__ import with_statement
# ==============================================================================
# GGisy (python v2.7)
#
# Author: Sandro Valenzuela (sandrolvalenzuead@gmail.com)
# Bugs and errors: https://github.com/Sanrrone/GGisy/issues
#
# Please type "python GGisy.py -h" for usage help
#
# ==============================================================================
__author__ = 'Sandro Valenzuela (sandrolvalenzuead@gmail.com)'
__version__ = '1.0'
import sys, os, subprocess, glob, csv, collections
from optparse import OptionParser
from operator import itemgetter
from Bio import SeqIO
def main():
parser = OptionParser(usage = "Usage: python GGisy.py -r genome1.fna -q genome2.fna")
parser.add_option("-r","--reference",dest="genome1",help="First genome to be used as reference", default=None)
parser.add_option("-q","--query",dest="genome2",help="Second genome to be used as query against the first genome (-r)", default=None)
parser.add_option("-l","--alignmentLength",dest="alignL",help="Aligment length cutoff in blast output",default=1000)
parser.add_option("-o","--outprefix",dest="outfile",help="output prefix for output files",default="synteny")
parser.add_option("-c","--coverage",dest="coverage",help="query coverage to be considered",default=50)
parser.add_option("-e","--evalue",dest="evalue",help="E-value cutoff for blastn search [default: 1e-3]",default=1e-3)
parser.add_option("-i","--identity",dest="Identity",help="Identity cutoff on the blastn alignment to consider the region",default=50)
parser.add_option("-t","--threads",dest="Threads",help="Number of threads to be used for blast [default: 4]",default=4)
parser.add_option("-b","--blastout",dest="Blastout",help="Blast output file to be used instead doing it [default: none]",default=None)
parser.add_option("-k","--keepfiles",dest="clean",help="clean files after execution [default: True]",default=True, action='store_false')
(options,args) = parser.parse_args()
genome1 = str(options.genome1)
genome2 = str(options.genome2)
alignL= int(options.alignL)
evalue= str(options.evalue)
Identity= int(options.Identity)
threads= str(options.Threads) #for subcallproccess must be str()
blastout= options.Blastout #dont cast to str
cleanf=options.clean
coverage=int(options.coverage)
outfile= options.outfile
#check variables
if not genome1 or genome1 is None:
print("* No genome was provided (-g1), use -h for help")
sys.exit()
else:
if os.path.isfile(genome1) == False:
print("*",genome1," doesn't exist")
sys.exit()
if not genome2 or genome2 is None:
print("* its mandatory provide 2 genomes (-g2), use -h for help")
sys.exit()
else:
if os.path.isfile(genome2) == False:
print("* ",genome2," doesn't exist")
sys.exit()
if blastout != None:
if os.path.isfile(blastout) == False:
print("* ", blastout, "not found, check if file exist or let the program do the blast omiting this option (-b)")
sys.exit()
blastBIN=which("blastn")
if blastBIN == None:
print("No blastn was found, install it before continue (make sure is in your $PATH)")
sys.exit()
makeblastBIN=which("makeblastdb")
if makeblastBIN == None:
print("No makeblastdb was found, install it from blast+ (make sure is in your $PATH)")
sys.exit()
rscriptBIN=which("Rscript")
if rscriptBIN == None:
print("No Rscript was found, make sure is in your $PATH")
sys.exit()
Inputs = collections.namedtuple('Inputs', ['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10'])
I = Inputs(genome1, genome2, alignL, evalue, Identity, threads, blastout, cleanf, coverage, outfile)
return I
def which(program): #function to check if some program exists
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def blasting(genome1, genome2, evalue, threads):
#searching for blast binaries
subprocess.call(["makeblastdb", "-in", genome1, "-input_type", "fasta", "-dbtype", "nucl", "-out", "ref"])
subprocess.call(["blastn", "-query", genome2, "-db", "ref",
"-evalue", evalue, "-outfmt", "6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen", "-strand", "both",
"-num_threads", threads, "-out", "tmp.tsv"])
return str("tmp.tsv")
def filterBlastOutput(blastout,alignL,evalue,identity,coverage, outfile):
PARSED=open(outfile+".tsv",'w') #overwrite if exist
with open(blastout) as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
for line in tsvreader:
toint = int(line[3])
cov = (toint/float(line[12]))*100
if toint >= alignL and cov >= coverage:
toint = float(line[2])
if toint >= float(identity):
PARSED.write("\t".join(map(str, line[0:3]+line[6:10]))+"\n")
PARSED.close()
def parsingGenomes(genome):
gname = genome.split('/')[-1]
PARSED=open(str(gname+"_info.tsv"),'w') #overwrite if exist
fasta_sequences = SeqIO.parse(open(genome),'fasta')
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
lengthSeq= len(sequence)
PARSED.write("%s\t1\t%s\n" % (name, lengthSeq))
PARSED.close
return str(gname+"_info.tsv")
def handleR(conn, reference, query, alignL, outfile):
plotstep=open("handle.R", 'w')
plotstep.write("""rm(list=ls());
library(OmicCircos)
library(RColorBrewer)
library(varhandle)
args<-commandArgs()
handlefile<-as.character(args[6])
refname<-as.character(args[7])
queryname<-as.character(args[8])
filterl<-as.numeric(args[9])
outfile<-as.character(args[10])
handle<-read.table(handlefile,sep = "\\t",stringsAsFactors = F,check.names = F)
ref<-read.table(refname,sep = "\\t",stringsAsFactors = F,check.names = F)
query<-read.table(queryname,sep = "\\t", stringsAsFactors = F,check.names = F)
rownames(ref)<-ref$V1
rownames(query)<-query$V1
qryUniq<-unique(sort(handle$V1))
refUniq<-unique(sort(handle$V2))
ref<-ref[refUniq,]
ref<-ref[with(ref, order(-V3, V1)), ]
query<-query[qryUniq,]
query<-query[with(query, order(+V3, V1)), ]
data<-rbind(ref,query)
refname<-unlist(strsplit(refname,"_info.tsv"))[1]
queryname<-unlist(strsplit(queryname,"_info.tsv"))[1]
lowId<-min(handle$V3)
fhand<-handle[handle$V6<handle$V7,]
rhand<-handle[handle$V6>handle$V7,]
linkf<-data.frame(seg1=fhand$V1, start1=fhand$V4, end1=fhand$V5, seg2=fhand$V2, start2=fhand$V6, end2=fhand$V7, stringsAsFactors = F)
linkr<-data.frame(seg1=rhand$V1, start1=rhand$V4, end1=rhand$V5, seg2=rhand$V2, start2=rhand$V6, end2=rhand$V7, stringsAsFactors = F)
#fix reverse positions
for(i in 1:nrow(linkr)){
contign<-linkr[i,4]
contigl<-ref[contign,3]
linkr[i,5]<- contigl-linkr[i,5]+1
linkr[i,6]<- contigl-linkr[i,6]+1
}
data["V5"]<-data["V4"]<-1
colnames(data)<- c("chr", "start", "end","V4","V5")
tocir <- segAnglePo(data, seg=data$chr)
gl<-sum(data$end)+nrow(data)
maxangr<-270+(350/gl)*sum(ref$V3)
spacer<-maxangr/(maxangr-270)/nrow(ref)
for(i in 1:nrow(ref)){
#358 is the total angles (aviable) for all
tocir[i,"angle.end"]<-as.character(as.numeric(tocir[i,"angle.start"]) + (350/gl)*as.numeric(tocir[i,7]))
tocir[i+1,"angle.start"]<-as.character(as.numeric(tocir[i,"angle.end"])+spacer)
}
tocir[i+1,"angle.start"]<-as.character(as.numeric(tocir[i+1,"angle.start"])+2.5)
tocir[i+1,"angle.end"]<-as.character(as.numeric(tocir[i+1,"angle.start"]) + (350/gl)*as.numeric(tocir[i+1,7]))
maxangq<-628-maxangr
spacer<-628/maxangq/nrow(query)
if(nrow(ref)+2>=nrow(tocir)){
i<-nrow(tocir)
tocir[i,"angle.start"]<-as.character(as.numeric(tocir[i-1,"angle.end"])+spacer)
tocir[i,"angle.end"]<-as.character(628)
}else{
for(i in (nrow(ref)+2):nrow(tocir)-1){
#358 is the total angles (aviable) for all
tocir[i,"angle.end"]<-as.character(as.numeric(tocir[i,"angle.start"]) + (350/gl)*as.numeric(tocir[i,7]))
tocir[i+1,"angle.start"]<-as.character(as.numeric(tocir[i,"angle.end"])+spacer)
}
}
refang<-as.numeric(tocir[1:nrow(ref),2])
qryang<-as.numeric(tocir[(nrow(ref)+1):(nrow(ref)+nrow(query)),2])
maxangr<-max(refang)
maxangq<-max(qryang)
faketocir <- tocir
faketocir[,1]<-""
maxangr<-max(refang)
for(i in 1:nrow(tocir)){
if(270+(maxangr-270)/2<as.numeric(tocir[i,2])){
break
}
}
faketocir[i,1]<-refname
maxangq<-max(qryang)
for(i in 1:nrow(tocir)){
if(maxangr+(maxangq-maxangr)/2<as.numeric(tocir[i,2])){
break
}
}
faketocir[i,1]<-queryname
colors<-rev(colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(20))
delta<-(100-lowId)/20
scaleColors<- function(x){
cArray<-c()
for(id in x){
for(i in 1:20){
if(id>=100-(delta*i)){
break
}
}
cArray<-c(cArray,colors[i])
}
return(cArray)
}
addalpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
black<-addalpha("#000000",0.7)
colors<-addalpha(colors,1)
try({
linkf[,"colors"]<-addalpha(scaleColors(fhand$V3),1)
},silent = T)
try({
linkr[,"colors"]<-addalpha(scaleColors(rhand$V3),1)
},silent = T)
pdf(file=paste0(outfile,".pdf"), width = 10, height =10)
if(nrow(data)<=20){
par(mar=c(2,2,2,2))
xorigin=700
yorigin=1000
plot(c(0,2000), c(0,2000), type="n", axes=FALSE, xlab="", ylab="", main="")
circos(R=450, cir=tocir, W=10,type="chr", print.chr.lab=T, scale=F,xc = xorigin,yc = yorigin,
col = c(rep("dark blue",nrow(ref)),rep("#FEE496",nrow(query))),cex = 5)
if(nrow(linkf)>0){
circos(R=440, cir=tocir, mapping=linkf , type="link.pg", lwd=0.5, col=linkf$colors,xc = xorigin,yc = yorigin)
}
if(nrow(linkr)>0){
circos(R=440, cir=tocir, mapping=linkr , type="link.pg", lwd=0.5, col=linkr$colors,xc = xorigin,yc = yorigin)
newlinkr<-linkr
newlinkr$start1<-newlinkr$start1+as.integer((newlinkr$end1-newlinkr$start1)/2)+1
newlinkr$start2<-newlinkr$start2+as.integer((newlinkr$end2-newlinkr$start2)/2)-1
circos(R=440, cir=tocir, W=10, mapping=newlinkr , type="link", lwd=0.6, col=black,xc = xorigin,yc = yorigin)
}
legend(x = 1500, y=1700, legend = c(refname,queryname),
ncol = 1, cex = 0.8, bty="n",
fill=c("dark blue","#FEE496"),
border = c("dark blue","#FEE496"),text.width=c(0.5,0.5),
title="Sequences")
legend(x = 1430, y=1500, legend = c(paste("Reference: ", nrow(ref), " (", sum(ref$V3), " bp)", sep = ""), paste("Query: ",nrow(query), " (", sum(query$V3), " bp)", sep="")),
ncol = 1, cex = 0.8, bty="n",
fill=c("dark blue","#FEE496"),
border = c("dark blue","#FEE496"),text.width=c(0.5,0.5),
title=paste("Contigs align >= ", filterl, " bp", sep=""))
legend(x = 1520, y=1300, legend = c("Forward","Reverse"),lty = c(0,1),merge=T,seg.len = 0.6,
ncol = 1, cex = 0.8, bty="n",
fill="white",
border = "black",text.width=c(0.5,0.5),
title="Strand Match\n(on reference)")
legend(x = 1505, y=1100, legend = c("100","","","","","","","","","",(100-lowId)/2 + lowId,"","","","","","","","",lowId),
ncol = 1, cex = 0.8, bty="n",
fill=colors,
border = colors,
y.intersp = 0.5,
x.intersp = 0.5,text.width=c(0.5,0.5),
title="Identity percent\n")
}else{
par(mar=c(2,2,2,2))
xorigin=750
yorigin=550
plot(c(0,1500), c(0,1500), type="n", axes=FALSE, xlab="", ylab="", main="")
circos(R=450, cir=faketocir, W=10,type="chr", print.chr.lab=T, scale=F,xc = xorigin,yc = yorigin,
col = "white")
circos(R=410, cir=tocir, W=10,type="chr", print.chr.lab=F, scale=F,xc = xorigin,yc = yorigin,
col = c(rep("dark blue",nrow(ref)),rep("#FEE496",nrow(query))),cex = 5)
if(nrow(linkf)>0){
highlightr <- c(420, 450, tocir[1,1], 1, tocir[nrow(ref),1], tocir[nrow(ref),7], "dark blue", NA)
circos(cir=tocir, mapping=highlightr, type="hl",xc = xorigin,yc = yorigin)
circos(R=400, cir=tocir, mapping=linkf , type="link.pg", lwd=0.5, col=linkf$colors,xc = xorigin,yc = yorigin)
}
if(nrow(linkr)>0){
highlightq <- c(420, 450, query[1,1], 1, query[nrow(query),1], query[nrow(query),3], "#FEE496", NA)
circos(cir=tocir, mapping=highlightq, type="hl",xc = xorigin,yc = yorigin)
circos(R=400, cir=tocir, mapping=linkr , type="link.pg", lwd=0.5, col=linkr$colors,xc = xorigin,yc = yorigin)
newlinkr<-linkr
newlinkr$start1<-newlinkr$start1+as.integer((newlinkr$end1-newlinkr$start1)/2)+1
newlinkr$start2<-newlinkr$start2+as.integer((newlinkr$end2-newlinkr$start2)/2)-1
circos(R=400, cir=tocir, W=10, mapping=newlinkr , type="link", lwd=0.3, col=black,xc = xorigin,yc = yorigin)
}
legend(x = 210, y=1500, legend = c(paste("Reference: ", nrow(ref), " (", sum(ref$V3), " bp)", sep = ""), paste("Query: ",nrow(query), " (", sum(query$V3), " bp)", sep="")),
ncol = 1, cex = 0.8, bty="n",
fill=c("dark blue","#FEE496"),
border = c("dark blue","#FEE496"),text.width=c(0.5,0.5),
title=paste("Contigs align >= ", filterl, " bp", sep=""))
legend(x = 270, y=1300, legend = c("Forward","Reverse"),lty = c(0,1),merge=T,seg.len = 0.6,
ncol = 1, cex = 0.8, bty="n",
fill="white",
border = "black",text.width=c(0.5,0.5),
title="Strand Match\\n(on reference)")
legend(x = 990, y=1500, legend = c("100","","","","","","","","","",(100-lowId)/2 + lowId,"","","","","","","","",lowId),
ncol = 1, cex = 0.8, bty="n",
fill=colors,
border = colors,
y.intersp = 0.5,
x.intersp = 0.5,text.width=c(0.5,0.5),
title="Identity percent\\n")
}
dev.off()""")
plotstep.close()
subprocess.call(["Rscript", "handle.R", conn, reference, query, str(alignL), outfile, "--vanilla"])
def cleanfiles(ginfo1, ginfo2):
if os.path.isfile("tmp.tsv"):
os.remove("tmp.tsv")
if os.path.isfile("ref.nin"):
os.remove("ref.nin")
if os.path.isfile("ref.nsq"):
os.remove("ref.nsq")
if os.path.isfile("ref.nhr"):
os.remove("ref.nhr")
if os.path.isfile("handle.R"):
os.remove("handle.R")
if os.path.isfile(ginfo1):
os.remove(ginfo1)
if os.path.isfile(ginfo2):
os.remove(ginfo2)
if __name__ == '__main__':
mainV=main()
blastout=mainV.v7
if blastout is None:
blastout=blasting(genome1=mainV.v1, genome2=mainV.v2, evalue=mainV.v4, threads=mainV.v6)
filterBlastOutput(blastout=blastout, alignL=mainV.v3, evalue=mainV.v4, identity=mainV.v5, coverage=mainV.v9, outfile=mainV.v10)
ref=parsingGenomes(genome=mainV.v1)
que=parsingGenomes(genome=mainV.v2)
handleR(conn=mainV.v10+".tsv",reference=ref, query=que, alignL=mainV.v3, outfile=mainV.v10)
if mainV.v8:
cleanfiles(ref,que)
sys.exit()
|
Sanrrone/GGisy
|
GGisy.py
|
Python
|
apache-2.0
| 14,845
|
[
"BLAST"
] |
8abc26b836a94471da7c9efe104d88ea6ff2050d6863c60feb6c4bcaf872b924
|
#!/usr/bin/env python
"""VisualizeTestTimes.py
Visualize CTest test times with VTK.
Run from the top of the build tree after the ctest has been run at least once.
Pass the --modular-grouping flag to group by module."""
from __future__ import print_function
import os
import pprint
import subprocess
import sys
import vtk
vtk_major_version = vtk.vtkVersion.GetVTKMajorVersion()
if len(sys.argv) > 1 and sys.argv[1] == '-h':
print('Usage: VisualizeTestTimes.py [--modular-grouping]')
sys.exit(1)
modular = False
if len(sys.argv) > 1 and sys.argv[1] == '--modular-grouping':
modular = True
# Sanity check to ensure we are in the build tree
test_cost_data_file = os.path.join('Testing', 'Temporary', 'CTestCostData.txt')
if not os.path.exists(test_cost_data_file):
print('Run this script from the build tree after running ctest ' +
'at least once.')
sys.exit(1)
# Read the input data
with open(test_cost_data_file, 'r') as fp:
test_cost_data_lines = fp.readlines()
failed_tests_index = test_cost_data_lines.index('---\n')
# Import the data into a vtkTable
table = vtk.vtkTable()
id_array = vtk.vtkUnsignedIntArray()
id_array.SetName('Pedigree Id')
table.AddColumn(id_array)
attributes = table.GetAttributes(vtk.vtkDataObject.ROW)
attributes.SetActivePedigreeIds('Pedigree Id')
test_name_array = vtk.vtkStringArray()
test_name_array.SetName('Test Name')
table.AddColumn(test_name_array)
number_of_runs_array = vtk.vtkUnsignedIntArray()
number_of_runs_array.SetName('Number of Runs')
table.AddColumn(number_of_runs_array)
test_time_array = vtk.vtkFloatArray()
test_time_array.SetName('Test Time')
table.AddColumn(test_time_array)
runs_long_array = vtk.vtkStringArray()
runs_long_array.SetName('RUNS_LONG Label')
table.AddColumn(runs_long_array)
other_labels_array = vtk.vtkStringArray()
other_labels_array.SetName('Other Labels')
table.AddColumn(other_labels_array)
table.SetNumberOfRows(failed_tests_index)
ctest_exe = 'ctest'
runs_long = subprocess.check_output([ctest_exe, '-L', 'RUNS_LONG', '-N'])
runs_long = runs_long.split('\n')[1:-3]
runs_long = [ii.split()[2] for ii in runs_long]
has_runs_long_time = 0.0
no_runs_long_time = 0.0
for ii in range(failed_tests_index):
split = test_cost_data_lines[ii].strip().split()
table.SetValue(ii, 0, ii)
name = split[0]
table.SetValue(ii, 1, name)
table.SetValue(ii, 2, int(split[1]))
time = float(split[2])
table.SetValue(ii, 3, time)
if name in runs_long:
table.SetValue(ii, 4, 'Has RUNS_LONG Label')
has_runs_long_time += time
else:
table.SetValue(ii, 4, 'No RUNS_LONG Label')
no_runs_long_time += time
table.SetValue(ii, 5, 'None')
labels = subprocess.check_output([ctest_exe, '--print-labels'])
labels = labels.split('\n')[2:-1]
labels = [ii.strip() for ii in labels]
if 'RUNS_LONG' in labels:
labels.pop(labels.index('RUNS_LONG'))
# Assuming tests will only have RUNS_LONG and up to only one other label
if modular:
for label in labels:
tests = subprocess.check_output([ctest_exe, '-L', label, '-N'])
tests = tests.split('\n')[2:-3]
tests = [ii.split()[2] for ii in tests]
for test in tests:
index = test_name_array.LookupValue(test)
other_labels_array.SetValue(index, label)
print('RUNS_LONG tests:')
pprint.pprint(runs_long)
print('RUNS_LONG time percentage: {0:.4}%'.format(str(has_runs_long_time /
(has_runs_long_time + no_runs_long_time) * 100)))
print('RUNS_LONG test percentage: {0:.4}%'.format(str((len(runs_long)) /
(failed_tests_index - 1.0) * 100)))
# Convert the vtkTable to a vtkTree
table_to_tree = vtk.vtkTableToTreeFilter()
if vtk_major_version is 5:
table_to_tree.SetInput(table)
else:
table_to_tree.SetInputData(table)
group_runs_long = vtk.vtkGroupLeafVertices()
group_runs_long.SetInputConnection(table_to_tree.GetOutputPort())
group_runs_long.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.VERTEX,
'RUNS_LONG Label')
group_runs_long.SetInputArrayToProcess(1, 0, 0, vtk.vtkDataObject.VERTEX,
'Test Name')
group_other_label = vtk.vtkGroupLeafVertices()
group_other_label.SetInputConnection(group_runs_long.GetOutputPort())
group_other_label.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.VERTEX,
'Other Labels')
group_other_label.SetInputArrayToProcess(1, 0, 0, vtk.vtkDataObject.VERTEX,
'Test Name')
# Visualize with a tree map view
tree_map_view = vtk.vtkTreeMapView()
if modular:
tree_map_view.SetTreeFromInputConnection(group_other_label.GetOutputPort())
else:
tree_map_view.SetTreeFromInputConnection(group_runs_long.GetOutputPort())
tree_map_view.SetAreaLabelArrayName('Test Name')
tree_map_view.SetAreaHoverArrayName('Test Name')
tree_map_view.SetAreaLabelVisibility(True)
tree_map_view.SetAreaSizeArrayName('Test Time')
tree_map_view.DisplayHoverTextOn()
tree_map_view.SetLayoutStrategyToSquarify()
# Pretty preference: Mellow, Neon, Ocean
theme = vtk.vtkViewTheme.CreateNeonTheme()
tree_map_view.ApplyViewTheme(theme)
tree_map_view.Update()
tree_map_view.ResetCamera()
interactor = tree_map_view.GetInteractor()
interactor.Initialize()
interactor.Start()
|
RayRuizhiLiao/ITK_4D
|
Utilities/Maintenance/VisualizeTestTimes.py
|
Python
|
apache-2.0
| 5,456
|
[
"VTK"
] |
8904403f507256aa83b54d3e12a11ec194d6058aafa2894682fa26fcdb11d6f6
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import re
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.powershell import module_manifest as ps_manifest
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.plugins.loader import module_utils_loader
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
_ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _ansiballz_main():
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read (realpath calls abspath)
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import imp
import shutil
import tempfile
import zipfile
if sys.version_info < (3,):
bytes = str
MOD_DESC = ('.py', 'U', imp.PY_SOURCE)
PY3 = False
else:
unicode = str
MOD_DESC = ('.py', 'r', imp.PY_SOURCE)
PY3 = True
ZIPDATA = """%(zipdata)s"""
# Note: temp_path isn't needed once we switch to zipimport
def invoke_module(modlib_path, temp_path, json_params):
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(modlib_path, mode='a')
# py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
# Note: Remove the following section when we switch to zipimport
# Write the module to disk for imp.load_module
module = os.path.join(temp_path, '__main__.py')
with open(module, 'wb') as f:
f.write(z.read('__main__.py'))
f.close()
# End pre-zipimport section
z.close()
# Put the zipped up module_utils we got from the controller first in the python path so that we
# can monkeypatch the right basic
sys.path.insert(0, modlib_path)
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
%(coverage)s
# Run the module! By importing it as '__main__', it thinks it is executing as a script
with open(module, 'rb') as mod:
imp.load_module('__main__', mod, module, MOD_DESC)
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, '__main__.py')
if command == 'excommunicate':
print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
command = 'execute'
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# Set pythonpath to the debug dir
sys.path.insert(0, basedir)
# read in the args file which the user may have modified
with open(args_path, 'rb') as f:
json_params = f.read()
# Monkeypatch the parameters into basic
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = json_params
# Run the module! By importing it as '__main__', it thinks it is executing as a script
import imp
with open(script_path, 'r') as f:
importer = imp.load_module('__main__', f, script_path, ('.py', 'r', imp.PY_SOURCE))
# Ansible modules must exit themselves
print('{"msg": "New-style module did not handle its own exit", "failed": true}')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
# Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
# (this helps ansible-test produce coverage stats)
temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
with open(zipped_mod, 'wb') as modlib:
modlib.write(base64.b64decode(ZIPDATA))
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
# Note: temp_path isn't needed once we switch to zipimport
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except (NameError, OSError):
# tempdir creation probably failed
pass
sys.exit(exitcode)
if __name__ == '__main__':
_ansiballz_main()
'''
ANSIBALLZ_COVERAGE_TEMPLATE = '''
# Access to the working directory is required by coverage.
# Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
try:
os.getcwd()
except OSError:
os.chdir('/')
os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
import atexit
import coverage
cov = coverage.Coverage(config_file='%(coverage_config)s')
def atexit_coverage():
cov.stop()
cov.save()
atexit.register(atexit_coverage)
cov.start()
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, templar, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = templar.template(task_vars[interpreter_config].strip())
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
if normalized_name not in py_module_names:
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
else:
normalized_name = py_module_name
if normalized_name not in py_module_names:
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# FIXME: Currently the AnsiBallZ wrapper monkeypatches module args into a global
# variable in basic.py. If a module doesn't import basic.py, then the AnsiBallZ wrapper will
# traceback when it tries to monkypatch. So, for now, we have to unconditionally include
# basic.py.
#
# In the future we need to change the wrapper to monkeypatch the args into a global variable in
# their own, separate python module. That way we won't require basic.py. Modules which don't
# want basic.py can import that instead. AnsibleModule will need to change to import the vars
# from the separate python module and mirror the args into its global variable for backwards
# compatibility.
if ('basic',) not in py_module_names:
pkg_dir_info = imp.find_module('basic', module_utils_paths)
normalized_modules.add(('basic',))
py_module_cache[('basic',)] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
# End of AnsiballZ hack
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
become_method, become_user, become_password, become_flags, environment):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE):
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = repr(json.dumps(params))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('__main__.py', b_module_data)
py_module_cache = {('__init__',): (b'', '[builtin]')}
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. '
'Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if coverage_config:
# Enable code coverage analysis of the module.
# This feature is for internal testing and may change without notice.
coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
coverage_config=coverage_config,
coverage_output=os.environ['_ANSIBLE_COVERAGE_OUTPUT']
)
else:
coverage = ''
now = datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
coverage=coverage,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# create the common exec wrapper payload and set that as the module_data
# bytes
b_module_data = ps_manifest._create_powershell_wrapper(
b_module_data, module_args, environment, async_timeout, become,
become_method, become_user, become_password, become_flags,
module_substyle
)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
task_vars = {} if task_vars is None else task_vars
environment = {} if environment is None else environment
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
async_timeout=async_timeout, become=become, become_method=become_method,
become_user=become_user, become_password=become_password, become_flags=become_flags,
environment=environment)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
b_lines = b_module_data.split(b"\n", 1)
if b_lines[0].startswith(b"#!"):
b_shebang = b_lines[0].strip()
# shlex.split on python-2.6 needs bytes. On python-3.x it needs text
args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
# _get_shebang() takes text strings
args = [to_text(a, errors='surrogate_or_strict') for a in args]
interpreter = args[0]
b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
errors='surrogate_or_strict', nonstring='passthru')
if b_new_shebang:
b_lines[0] = b_shebang = b_new_shebang
if os.path.basename(interpreter).startswith(u'python'):
b_lines.insert(1, b_ENCODING_STRING)
shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(b_lines)
return (b_module_data, module_style, shebang)
|
tchernomax/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 40,953
|
[
"VisIt"
] |
09fc5889c0d48eb9e1e91309d60c90abcdfd610abbaefc01efcbe4a4309d3dc0
|
import random
import unittest
from hearthbreaker.agents.basic_agents import PredictableAgent, DoNothingAgent
from tests.agents.testing_agents import OneCardPlayingAgent, EnemySpellTestingAgent, \
CardTestingAgent, EnemyMinionSpellTestingAgent, PlayAndAttackAgent, HeroPowerAndCardPlayingAgent
from hearthbreaker.constants import MINION_TYPE
from tests.testing_utils import generate_game_for
from hearthbreaker.cards import *
class TestWarlock(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_MortalCoil(self):
game = generate_game_for(BloodfenRaptor, MortalCoil, DoNothingAgent, OneCardPlayingAgent)
raptor = BloodfenRaptor()
raptor.summon(game.players[0], game, 0)
# player 0 plays raptor
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(5, len(game.players[1].hand))
game.play_single_turn()
game.play_single_turn()
# mortal coils the 2hp raptor
self.assertEqual(5, len(game.players[1].hand))
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
# mortal coils the 1hp raptor and draws
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(6, len(game.players[1].hand))
def test_MortalCoilDivineShield(self):
game = generate_game_for(StonetuskBoar, MortalCoil, DoNothingAgent, OneCardPlayingAgent)
scarlet = ScarletCrusader()
scarlet.summon(game.players[0], game, 0)
# player 0 plays Scarlet Crusader
self.assertTrue(game.players[0].minions[0].divine_shield)
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual(5, len(game.players[1].hand))
game.play_single_turn()
game.play_single_turn()
# mortal coils the divine shield, no draw
self.assertFalse(game.players[0].minions[0].divine_shield)
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual(5, len(game.players[1].hand))
game.play_single_turn()
game.play_single_turn()
# mortal coils the 1hp scarlet crusader and draws
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(6, len(game.players[1].hand))
def test_FlameImp(self):
game = generate_game_for(FlameImp, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
game.play_single_turn()
# play Flame Imp, 3 damage to own hero
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(27, game.players[0].hero.health)
def test_PitLord(self):
game = generate_game_for(PitLord, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
# play Pit Lord, 5 damage to own hero
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(25, game.players[0].hero.health)
def test_DreadInfernal(self):
game = generate_game_for(DreadInfernal, StonetuskBoar, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(5, len(game.players[1].minions))
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
# Plays Dread Infernal, 1 damage to all
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].health)
self.assertEqual(29, game.players[0].hero.health)
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(29, game.players[1].hero.health)
def test_Felguard(self):
game = generate_game_for(Felguard, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(2, game.players[0].max_mana)
game.play_single_turn()
# Plays Felguard, destroys mana crystal
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, game.players[0].max_mana)
def test_Succubus(self):
game = generate_game_for(Succubus, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
game.play_single_turn()
# Plays Succubus, discards
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(3, len(game.players[0].hand))
def test_Doomguard(self):
game = generate_game_for(Doomguard, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(7, len(game.players[0].hand))
game.play_single_turn()
# Plays Doomguard, discards twice
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, len(game.players[0].hand))
def test_Hellfire(self):
game = generate_game_for(Hellfire, SilverbackPatriarch, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 6):
game.play_single_turn()
# plays 1 Silverback Patriarch
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(4, game.players[1].minions[0].health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
# Plays Hellfire, 3 damage to all
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
self.assertEqual(27, game.players[0].hero.health)
self.assertEqual(27, game.players[1].hero.health)
def test_ShadowBolt(self):
game = generate_game_for(ShadowBolt, SilverbackPatriarch, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 6):
game.play_single_turn()
# Plays Silverback Patriarch
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(4, game.players[1].minions[0].health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
# Uses Shadow Bolt
self.assertEqual(0, len(game.players[1].minions))
def test_DrainLife(self):
game = generate_game_for(DrainLife, MindBlast, EnemySpellTestingAgent, CardTestingAgent)
for turn in range(0, 4):
game.play_single_turn()
# Uses Mind Blast
self.assertEqual(25, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
# Uses Drain Life
self.assertEqual(27, game.players[0].hero.health)
self.assertEqual(28, game.players[1].hero.health)
def test_Soulfire(self):
game = generate_game_for(Soulfire, StonetuskBoar, EnemySpellTestingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(2, len(game.players[0].hand))
self.assertEqual(26, game.players[1].hero.health)
game.play_single_turn()
game.play_single_turn()
# It should play 2 copies of Soulfire at the enemy hero and discard the remaining copy
self.assertEqual(0, len(game.players[0].hand))
self.assertEqual(18, game.players[1].hero.health)
self.assertEqual(30, game.players[0].hero.health)
def test_TwistingNether(self):
game = generate_game_for(TwistingNether, SilverbackPatriarch, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 14):
game.play_single_turn()
# Plays Silverback Patriarch each turn
self.assertEqual(5, len(game.players[1].minions))
game.play_single_turn()
# Plays Twisting Nether
self.assertEqual(0, len(game.players[1].minions))
def test_DemonfireEnemy(self):
game = generate_game_for(Demonfire, FlameImp, EnemyMinionSpellTestingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
# play Flame Imp, 3 damage to own hero
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(27, game.players[1].hero.health)
game.play_single_turn()
# Demonfire to kill enemy Flame Imp
self.assertEqual(0, len(game.players[1].minions))
def test_DemonfireAlly(self):
game = generate_game_for(Demonfire, StonetuskBoar, CardTestingAgent, DoNothingAgent)
imp = FlameImp()
imp.summon(game.players[0], game, 0)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health) # summon doesnt trigger battlecry
game.play_single_turn()
# Demonfire to buff own Flame Imp
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].health)
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
def test_DemonfireAllyNonDemon(self):
game = generate_game_for(Demonfire, StonetuskBoar, CardTestingAgent, DoNothingAgent)
raptor = BloodfenRaptor()
raptor.summon(game.players[0], game, 0)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health)
game.play_single_turn()
# Demonfire to kill own Raptor
self.assertEqual(0, len(game.players[0].minions))
def test_SacrificialPact(self):
game = generate_game_for(MindBlast, SacrificialPact, CardTestingAgent, CardTestingAgent)
for turn in range(0, 3):
game.play_single_turn()
# Uses 1 Mindblast
self.assertEqual(25, game.players[1].hero.health)
imp = FlameImp()
imp.summon(game.players[0], game, 0)
game.play_single_turn()
# Pact the Imp
self.assertEqual(30, game.players[1].hero.health)
def test_SiphonSoul(self):
game = generate_game_for(MindBlast, SiphonSoul, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 11):
game.play_single_turn()
# Uses Mindblast for 5 turns
self.assertEqual(5, game.players[1].hero.health)
boar = StonetuskBoar()
boar.summon(game.players[0], game, 0)
game.play_single_turn()
# Siphon Soul on the Boar
self.assertEqual(8, game.players[1].hero.health)
def test_SenseDemons(self):
game = generate_game_for([SenseDemons, Doomguard], StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(5, len(game.players[0].hand))
game.play_single_turn()
# plays Sense Demons and draws 2 Doomguards
self.assertEqual(7, len(game.players[0].hand))
self.assertEqual('Doomguard', game.players[0].hand[5].name)
self.assertEqual('Doomguard', game.players[0].hand[6].name)
for turn in range(0, 4):
game.play_single_turn()
def test_SenseDemonsNoDemons(self):
game = generate_game_for(SenseDemons, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(5, len(game.players[0].hand))
game.play_single_turn()
# plays Sense Demons and draws 2 Worthless Imps
self.assertEqual(7, len(game.players[0].hand))
self.assertEqual('Worthless Imp', game.players[0].hand[5].name)
self.assertEqual('Worthless Imp', game.players[0].hand[6].name)
game.play_single_turn()
game.play_single_turn()
# Sense Demons again
self.assertEqual(9, len(game.players[0].hand))
game.play_single_turn()
game.play_single_turn()
# Sense Demons again
self.assertEqual(10, len(game.players[0].hand))
self.assertEqual(0, len(game.players[0].minions))
for turn in range(0, 4):
game.play_single_turn()
# Play 3 copies of Sense Demons and then 2 copies of Worthless Imp
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Worthless Imp", game.players[0].minions[0].card.name)
self.assertEqual("Worthless Imp", game.players[0].minions[1].card.name)
def test_SenseDemonsOverflow(self):
game = generate_game_for([SenseDemons, SenseDemons, SenseDemons, FlameImp, FlameImp], Wisp,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(5, len(game.players[0].hand))
# Sense Demons to draw 2 Flame Imps
game.play_single_turn()
self.assertEqual(7, len(game.players[0].hand))
# Sense Demons to draw 2 more Flame Imps
game.play_single_turn()
game.play_single_turn()
self.assertEqual(9, len(game.players[0].hand))
# Sense Demons to draw 2 more Flame Imps, but 1 gets destroyed
game.play_single_turn()
game.play_single_turn()
self.assertEqual(10, len(game.players[0].hand))
def test_BaneOfDoom(self):
game = generate_game_for(BaneOfDoom, StonetuskBoar, EnemyMinionSpellTestingAgent, DoNothingAgent)
imp = FlameImp()
imp.summon(game.players[1], game, 0)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
game.play_single_turn()
# Kills enemy Imp with Bane of Doom and summons random demon
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(MINION_TYPE.DEMON, game.players[0].minions[0].card.minion_type)
# Banes the Mogushan but does not kill it
mogu = MogushanWarden()
mogu.summon(game.players[1], game, 0)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(5, game.players[1].minions[0].health)
def test_Corruption(self):
game = generate_game_for(Corruption, StonetuskBoar, EnemyMinionSpellTestingAgent, DoNothingAgent)
imp = FlameImp()
imp.summon(game.players[1], game, 0)
self.assertEqual(1, len(game.players[1].minions))
game.play_single_turn()
# Casts Corruption on enemy Imp
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(3, len(game.players[0].hand))
game.play_single_turn()
# Enemy minion still alive until start of my turn
self.assertEqual(1, len(game.players[1].minions))
game.play_single_turn()
# Corruption resolves at start of my turn, no targets to use remaining cards on
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(4, len(game.players[0].hand))
def test_PowerOverwhelming(self):
game = generate_game_for(PowerOverwhelming, StonetuskBoar, CardTestingAgent, DoNothingAgent)
imp = FlameImp()
imp.summon(game.players[0], game, 0)
self.assertEqual(1, len(game.players[0].minions))
def verify_poweroverwhelming():
self.assertEqual(7, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.players[0].minions[0].bind("health_changed", verify_poweroverwhelming)
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(3, len(game.players[0].hand))
def test_Shadowflame(self):
game = generate_game_for(Shadowflame, Shieldbearer, CardTestingAgent, OneCardPlayingAgent)
imp = FlameImp()
imp.summon(game.players[0], game, 0)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(4, game.players[1].minions[0].health)
self.assertEqual(4, game.players[1].minions[1].health)
self.assertEqual(4, game.players[1].minions[2].health)
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
# Uses Shadowflame on own Flame Imp
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
self.assertEqual(1, game.players[1].minions[1].health)
self.assertEqual(1, game.players[1].minions[2].health)
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
def test_SummoningPortal(self):
game = generate_game_for([SummoningPortal, Wisp], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual('Wisp', game.players[0].hand[0].name)
self.assertEqual(0, game.players[0].hand[0].mana_cost())
def test_SummoningPortal_Mechwarper(self):
game = generate_game_for([SummoningPortal, Mechwarper, SpiderTank], StonetuskBoar,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(9):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(0, game.current_player.hand[0].mana_cost())
def test_BloodImp(self):
game = generate_game_for(BloodImp, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].stealth)
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].stealth)
self.assertEqual(2, game.players[0].minions[1].health)
self.assertTrue(game.players[0].minions[1].stealth)
def test_Jaraxxus(self):
game = generate_game_for(LordJaraxxus, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 17):
game.play_single_turn()
self.assertEqual(0, len(game.current_player.minions))
self.assertEqual(15, game.current_player.hero.health)
self.assertEqual(15, game.current_player.hero.calculate_max_health())
self.assertEqual(27, game.other_player.hero.health)
game.current_player.agent = PredictableAgent()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(6, game.current_player.minions[0].calculate_attack())
self.assertEqual(6, game.current_player.minions[0].calculate_max_health())
self.assertEqual(8, game.current_player.mana)
self.assertEqual(24, game.other_player.hero.health)
def test_Jaraxxus_with_secrets(self):
class SecretTester(DoNothingAgent):
def __init__(self):
super().__init__()
self.turn = 0
def do_turn(self, player):
self.turn += 1
if self.turn >= 8:
player.game.play_card(player.hand[0])
game = generate_game_for(LordJaraxxus, [Repentance, Snipe, MirrorEntity], CardTestingAgent, SecretTester)
for turn in range(0, 17):
game.play_single_turn()
# Jaraxxus should be played, Repentance should activate, leaving
# the hero with one health and one max health
# See http://www.reddit.com/r/hearthstone/comments/218vsu/jaraxxus_and_sword_of_justice_rule_inconsistency_o/
self.assertEqual(1, game.current_player.hero.health)
self.assertEqual(1, game.current_player.hero.calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(11, game.current_player.hero.health)
self.assertEqual(15, game.current_player.hero.calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(15, game.current_player.hero.health)
self.assertEqual(15, game.current_player.hero.calculate_max_health())
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Lord Jaraxxus", game.other_player.minions[0].card.name)
def test_Jaraxxus_Repentance_Snipe(self):
game = generate_game_for(LordJaraxxus, [Repentance, Snipe], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(17):
game.play_single_turn()
# Based on https://www.youtube.com/watch?v=n8u2Senk_XU
self.assertEqual(0, len(game.current_player.minions))
self.assertEqual(0, game.current_player.hero.health)
self.assertTrue(game.current_player.hero.dead)
self.assertTrue(game.game_ended)
def test_Jaraxxus_with_SacrificialPact(self):
game = generate_game_for(LordJaraxxus, SacrificialPact, CardTestingAgent, CardTestingAgent)
for turn in range(0, 18):
game.play_single_turn()
# Sacrificial pact will target Jaraxxus, killing him instantly
# See http://www.hearthhead.com/card=163/sacrificial-pact#comments:id=1889015
self.assertTrue(game.other_player.hero.dead)
self.assertTrue(game.game_ended)
def test_VoidTerror(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, VoidTerror], StonetuskBoar,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
def _choose_index(card, player):
return 1
game.players[0].agent.choose_index = _choose_index
self.assertEqual(2, len(game.players[0].minions))
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
def test_Voidcaller(self):
game = generate_game_for(Assassinate, [Voidcaller, FlameImp, ArgentSquire, BoulderfistOgre, StonetuskBoar],
CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Voidcaller", game.current_player.minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Flame Imp", game.other_player.minions[0].card.name)
def testAnimaGolem(self):
game = generate_game_for([Loatheb, AnimaGolem, TwistingNether, AnimaGolem], StonetuskBoar,
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 10):
game.play_single_turn()
# Loatheb should be played
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Loatheb", game.other_player.minions[0].card.name)
self.assertEqual(5, len(game.current_player.minions))
# The golem should be played, and live past the end of its turn, because of Loatheb
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual("Anima Golem", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
# Twisting Nether is played, removing everything from the board
game.play_single_turn()
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
# The golem is played alone, which results in its death at the end of the turn
game.play_single_turn()
self.assertEqual(0, len(game.current_player.minions))
def test_Darkbomb(self):
game = generate_game_for(Darkbomb, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_DemonheartEnemy(self):
game = generate_game_for(Demonheart, FlameImp, EnemyMinionSpellTestingAgent, OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(4, len(game.players[1].minions))
game.play_single_turn()
# Demonheart to kill enemy Flame Imp
self.assertEqual(3, len(game.players[1].minions))
def test_DemonheartAlly(self):
game = generate_game_for(Demonheart, StonetuskBoar, CardTestingAgent, DoNothingAgent)
imp = FlameImp()
imp.summon(game.players[0], game, 0)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.play_single_turn()
# Demonheart to buff own Flame Imp
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(8, game.players[0].minions[0].calculate_attack())
self.assertEqual(7, game.players[0].minions[0].health)
def test_DemonheartAllyNonDemon(self):
game = generate_game_for(Demonheart, StonetuskBoar, CardTestingAgent, DoNothingAgent)
raptor = BloodfenRaptor()
raptor.summon(game.players[0], game, 0)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.play_single_turn()
# Demonheart to kill own Raptor
self.assertEqual(0, len(game.players[0].minions))
def test_FelCannon(self):
game = generate_game_for([FelCannon, BoulderfistOgre], [BloodfenRaptor, HarvestGolem, Deathwing],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
# Fel Cannon should target the Bloodfen Raptor
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Harvest Golem", game.other_player.minions[0].card.name)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(5, game.current_player.minions[0].health)
game.play_single_turn()
game.play_single_turn()
# Fel Cannon should target nothing
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Harvest Golem", game.other_player.minions[0].card.name)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(5, game.current_player.minions[0].health)
game.play_single_turn()
game.play_single_turn()
# Fel Cannon should target ogre
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Harvest Golem", game.other_player.minions[0].card.name)
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(5, game.current_player.minions[0].health)
self.assertEqual(5, game.current_player.minions[1].health)
def test_ImpLosion(self):
game = generate_game_for([Implosion, OgreMagi], [SpiderTank, Whirlwind],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 7):
game.play_single_turn()
# Rolls 4, killing Spider
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(0, len(game.players[1].minions))
# Whirlwind clears, Ogre, Spider, Implosion
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
# Rolls 3 + 1 spell damage, killing Spider
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(0, len(game.players[1].minions))
# Whirlwind clears except Ogre, Ogre, Spider, Implosion
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
# Rolls 2 + 2 spell damage, rip Spider
self.assertEqual(6, len(game.players[0].minions))
self.assertEqual(0, len(game.players[1].minions))
def test_MalGanis(self):
game = generate_game_for([FlameImp, MalGanis], FlameImp, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(17):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertTrue(game.current_player.hero.immune)
self.assertFalse(game.other_player.hero.immune)
self.assertEqual(5, game.current_player.minions[1].calculate_attack())
self.assertEqual(4, game.current_player.minions[1].calculate_max_health())
self.assertEqual(9, game.current_player.minions[0].calculate_attack())
self.assertEqual(7, game.current_player.minions[0].calculate_max_health())
for minion in game.other_player.minions:
self.assertEqual(3, minion.calculate_attack())
self.assertEqual(2, minion.calculate_max_health())
def test_FloatingWatcher(self):
game = generate_game_for(FloatingWatcher, Hellfire, HeroPowerAndCardPlayingAgent, OneCardPlayingAgent)
for turn in range(13):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(9, game.current_player.hero.health)
self.assertEqual(4, game.current_player.minions[0].calculate_attack())
self.assertEqual(4, game.current_player.minions[0].calculate_max_health())
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(6, game.other_player.hero.health)
self.assertEqual(4, game.other_player.minions[0].calculate_attack())
self.assertEqual(4, game.other_player.minions[0].calculate_max_health())
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(4, game.current_player.hero.health)
self.assertEqual(4, game.current_player.minions[0].calculate_attack())
self.assertEqual(4, game.current_player.minions[0].calculate_max_health())
self.assertEqual(6, game.current_player.minions[1].calculate_attack())
self.assertEqual(6, game.current_player.minions[1].calculate_max_health())
def test_MistressOfPain(self):
game = generate_game_for([MistressOfPain, AbusiveSergeant], SinisterStrike,
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(27, game.current_player.hero.health)
def test_MistressOfPain_Auchenai(self):
game = generate_game_for([MistressOfPain, AuchenaiSoulpriest], SinisterStrike,
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(0, game.current_player.hero.health)
self.assertTrue(game.current_player.hero.dead)
def test_MistressOfPain_DivineShield(self):
game = generate_game_for(MistressOfPain, [SinisterStrike, ArgentSquire],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
# The mistress of pain does no damage, so the hero isn't healed
# See https://www.youtube.com/watch?v=wakqQSBjDdE
self.assertEqual(27, game.current_player.hero.health)
def test_MistressOfPain_saves_hero(self):
# based on http://youtu.be/_Z2ZU-cIoG8?t=1m3s
game = generate_game_for([MistressOfPain, Demonfire], Misdirection, PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(4):
game.play_single_turn()
game.players[0].hero.health = 2
game.play_single_turn()
self.assertFalse(game.game_ended)
def test_ImpGangBoss(self):
game = generate_game_for(ImpGangBoss, [MortalCoil, ShadowWordPain], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.play_single_turn() # Mortal Coils the gang boss
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Imp", game.players[0].minions[1].card.name)
game.play_single_turn() # Plays 2nd Gang Boss
game.play_single_turn() # Pains 1 Boss, no damage
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Imp Gang Boss", game.players[0].minions[0].card.name)
self.assertEqual("Imp", game.players[0].minions[1].card.name)
def test_Demonwrath(self):
game = generate_game_for(Demonwrath, [Voidwalker, RiverCrocolisk], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health) # Beast damaged
self.assertEqual(3, game.players[1].minions[1].health) # Voidwalker undamaged
def test_FistOfJaraxxus(self):
game = generate_game_for([Succubus, FistOfJaraxxus, FistOfJaraxxus, FistOfJaraxxus, FistOfJaraxxus],
StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(3):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(26, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn() # Fists of Jaraxxus will be played, targeting the enemy hero
self.assertEqual(22, game.other_player.hero.health)
|
jomyhuang/sdwle
|
testsHB/card_tests/warlock_tests.py
|
Python
|
mit
| 35,057
|
[
"BLAST",
"CRYSTAL"
] |
c7cd6929eb98eba186672c4497425e9bd2635fb331054e29e6988aa5adca71f6
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
This ao2mo module is kept for backward compatiblity. It's recommended to use
pyscf.pbc.df module to get 2e MO integrals
'''
import numpy as np
from pyscf.pbc import df
from pyscf import lib
from pyscf.pbc.dft.gen_grid import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
from pyscf.pbc import tools
from pyscf.lib import logger
einsum = lib.einsum
def general(cell, mo_coeffs, kpts=None, compact=False):
'''pyscf-style wrapper to get MO 2-el integrals.'''
if kpts is not None:
assert len(kpts) == 4
return get_mo_eri(cell, mo_coeffs, kpts)
#return df.FFTDF(cell).ao2mo(mo_coeffs, kpts, compact)
def get_mo_eri(cell, mo_coeffs, kpts=None):
'''Convenience function to return MO 2-el integrals.'''
mo_coeff12 = mo_coeffs[:2]
mo_coeff34 = mo_coeffs[2:]
if kpts is None:
kpts12 = kpts34 = q = None
else:
kpts12 = kpts[:2]
kpts34 = kpts[2:]
q = kpts12[0] - kpts12[1]
#q = kpts34[1] - kpts34[0]
if q is None:
q = np.zeros(3)
mo_pairs12_kG = get_mo_pairs_G(cell, mo_coeff12, kpts12)
mo_pairs34_invkG = get_mo_pairs_invG(cell, mo_coeff34, kpts34, q)
return assemble_eri(cell, mo_pairs12_kG, mo_pairs34_invkG, q)
def get_mo_pairs_G(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G : (ngrids, nmoi*nmoj) ndarray
The FFT of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngrids,nmoi*nmoj], np.complex128)
expmikr = np.exp(-1j*np.dot(q,coords.T))
for i in xrange(nmoi):
for j in xrange(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R_ij, cell.mesh,
expmikr)
return mo_pairs_G
def get_mo_pairs_invG(cell, mo_coeffs, kpts=None, q=None):
'''Calculate "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_invG : (ngrids, nmoi*nmoj) ndarray
The inverse FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_invG = np.zeros([ngrids,nmoi*nmoj], np.complex128)
expmikr = np.exp(-1j*np.dot(q,coords.T))
for i in xrange(nmoi):
for j in xrange(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R_ij),
cell.mesh, expmikr.conj()))
return mo_pairs_invG
def assemble_eri(cell, orb_pair_invG1, orb_pair_G2, q=None, verbose=logger.INFO):
'''Assemble 4-index electron repulsion integrals.
Returns:
(nmo1*nmo2, nmo3*nmo4) ndarray
'''
log = logger.Logger
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(cell.stdout, verbose)
log.debug('Performing periodic ERI assembly of (%i, %i) ij,kl pairs',
orb_pair_invG1.shape[1], orb_pair_G2.shape[1])
if q is None:
q = np.zeros(3)
coulqG = tools.get_coulG(cell, -1.0*q)
ngrids = orb_pair_invG1.shape[0]
Jorb_pair_G2 = np.einsum('g,gn->gn',coulqG,orb_pair_G2)*(cell.vol/ngrids**2)
eri = np.dot(orb_pair_invG1.T, Jorb_pair_G2)
return eri
#def get_mo_eri(cell, mo_coeffs, kpts=None):
# '''Convenience function to return MO 2-el integrals.'''
# return general(cell, mo_coeffs, kpts)
#def get_mo_pairs_G(cell, mo_coeffs, kpts=None):
# '''Calculate forward (G|ij) FFT of all MO pairs.
#
# TODO: - Implement simplifications for real orbitals.
#
# Args:
# mo_coeff: length-2 list of (nao,nmo) ndarrays
# The two sets of MO coefficients to use in calculating the
# product |ij).
#
# Returns:
# mo_pairs_G : (ngrids, nmoi*nmoj) ndarray
# The FFT of the real-space MO pairs.
# '''
# return df.FFTDF(cell).get_mo_pairs(mo_coeffs, kpts)
#
#def get_mo_pairs_invG(cell, mo_coeffs, kpts=None):
# '''Calculate "inverse" (ij|G) FFT of all MO pairs.
#
# TODO: - Implement simplifications for real orbitals.
#
# Args:
# mo_coeff: length-2 list of (nao,nmo) ndarrays
# The two sets of MO coefficients to use in calculating the
# product |ij).
#
# Returns:
# mo_pairs_invG : (ngrids, nmoi*nmoj) ndarray
# The inverse FFTs of the real-space MO pairs.
# '''
# if kpts is None: kpts = numpy.zeros((2,3))
# mo_pairs_G = df.FFTDF(cell).get_mo_pairs((mo_coeffs[1],mo_coeffs[0]),
# (kpts[1],kpts[0]))
# nmo0 = mo_coeffs[0].shape[1]
# nmo1 = mo_coeffs[1].shape[1]
# mo_pairs_invG = mo_pairs_G.T.reshape(nmo1,nmo0,-1).transpose(1,0,2).conj()
# mo_pairs_invG = mo_pairs_invG.reshape(nmo0*nmo1,-1).T
# return mo_pairs_invG
def get_ao_pairs_G(cell, kpts=None):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all AO pairs.
Args:
cell : instance of :class:`Cell`
Returns:
ao_pairs_G, ao_pairs_invG : (ngrids, nao*(nao+1)/2) ndarray
The FFTs of the real-space AO pairs.
'''
return df.FFTDF(cell).get_ao_pairs(kpts)
def get_ao_eri(cell, kpts=None):
'''Convenience function to return AO 2-el integrals.'''
if kpts is not None:
assert len(kpts) == 4
return df.FFTDF(cell).get_eri(kpts)
if __name__ == '__main__':
from pyscf.pbc import gto as pgto
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell.basis = 'ccpvdz'
cell.a = np.eye(3) * 4.
cell.mesh = [11]*3
cell.build()
print(get_ao_eri(cell).shape)
|
gkc1000/pyscf
|
pyscf/pbc/ao2mo/eris.py
|
Python
|
apache-2.0
| 8,686
|
[
"PySCF"
] |
d0a125bb931454254d54c62583cc2ad86be3437f7429c18c6786cdf69afa0f04
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
## Create an incomplete Amber topology from a PDB.
## The parm file will have the same atom content as the PDB -
## i.e. it will have missing atoms and cannot be used for simulation.
##
## last $Author$
## last $Date$
import sys
from Biskit import AmberParmBuilder, PDBModel, LogFile
import Biskit.tools as t
def _use( options ):
print """
Create amber topology and coordinate files from a PDB.
Syntax: am_pdb2parm.py -i |PDBfile| [-o |out.parm| ...any of options below ]
OR: am_pdb2parm.py -i |PDBfile| -mirror [-o |out.parm| ]
Result: out.parm, out.crd, out.pdb, (and leap.log in current folder)
Special option -mirror: create a parm for exact atom content of input PDB
(no S-S bonds formed, atoms missing from residues..)
This parm can be used for ptraj but not for simulations!
Options:
ocrd - str, target file for crd (coordinates) [|f_out_base|.crd]
opdb - str, target file for pdb [|f_out_base|.pdb]
hetatm - keep hetero atoms [don't]
cap - put ACE and NME capping residue on chain breaks [don't]
capN - int int, indices of chains that should get ACE cap []
capC - int int, indices of chains that should get NME cap []
box - float, minimal distance of solute from box edge [10.0]
nocenter - do *not* re-center the input coordinates [center]
fmod - str str, list of files with amber parameter modifications
(to be loaded into leap with loadAmberParams) []
fprep - str str, list of files with amber residue definitions
(to be loaded into leap with loadAmberPrep) []
leap_template - str, path to template file for leap input [use default]
leaprc - str, path to parameter file for leap [use default]
leap_out - str, target file for leap.log [default: discard]
leap_in - str, target file for leap.in script [default: discard]
leap_pdb - str, target file for cleaned input pdb [discard]
leap_bin - str, path to tleap binary [use default]
norun - do not run leap, only prepare files
debug - keep all temporary files
more -key value pairs for place holders in leap input template
Comments:
- The protonation state of histidines is decided from the H-atoms that
are found (HD, HE, or both). After that all H are removed to be added again
by leap.
- Cleaning tries to convert non-standard residues to the closest standard one.
- Cleaning removes non-standard atoms (and atoms following them) from standard
residues.
- Cleaning keeps the largest / first of multiple occupancies
- Ends of chains are assumed if the residue numbering jumps backward, if there
is a TER record or chain ID or segid change, or if there is a chain break.
- A chain break is assumed if there is an untypical gap in the chain of back-
bone atoms (see PDBModel.chainBreaks() ).
- The index of the first chain is 0.
- Original waters are deleted.
- As usual, options can also be put into a file and loaded with the -x option
Default options:
"""
for key, value in options.items():
print "\t-",key, "\t",value
#sys.exit(0)
options = t.cmdDict( {'o':'out.parm'} )
try:
f_out = options['o']
if 'ocrd' in options:
options['f_out_crd'] = options['ocrd']
if 'opdb' in options:
options['f_out_pdb'] = options['opdb']
if 'box' in options:
options['box'] = float( options['box'] )
options['cap'] = 'cap' in options
options['capN']= t.toIntList( options.get('capN',[]))
options['capC']= t.toIntList( options.get('capC',[]))
options['hetatm'] = 'hetatm' in options
options['norun'] = 'norun' in options
options['debug'] = 'debug' in options
options['center'] = not 'nocenter' in options
if 'log' in options:
options['log'] = LogFile( options['log'] )
if 'norun' in options:
fbase = t.stripSuffix( t.absfile( options['i'] ) )
options['leap_in'] = options.get('leap_in', fbase+'_leap.in')
options['leap_pdb']= options.get('leap_pdb',fbase+'_forleap.pdb')
a = AmberParmBuilder( options['i'], **options )
del options['debug']
if not 'mirror' in options:
a.parmSolvated( f_out, **options )
else:
a.parmMirror( f_out, **options )
except KeyError, why:
_use( options )
except Exception, why:
print "There was an error..."
print t.lastError()
print t.lastErrorTrace()
|
ostrokach/biskit
|
scripts/Biskit/amber_pdb2parm.py
|
Python
|
gpl-3.0
| 5,523
|
[
"Amber"
] |
b5343cbb5dbee2835a4815add838ed44e1365c54dc1091b5b02b9c6202a57a42
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('events.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
brayan15/Events
|
config/urls.py
|
Python
|
mit
| 1,546
|
[
"VisIt"
] |
272145e725853e4dab501d225ede3f658c974ce4dceae8bc89807adc94292db4
|
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from . import utils
import scipy as sp
from scipy import ndimage
from six.moves import zip
def plot_stars(p, x, y, size='large', horizontalalignment='center', **kwargs):
''' Plots significance stars '''
plt.text(x, y, utils.stars(p), size=size, horizontalalignment=horizontalalignment, **kwargs)
def plot_linestar(p, x1, x2, y):
hlines(y, x1, x2)
plot_stars(0.5 * (x1 + x2), y + 0.02, utils.stars(p), size='large', horizontalalignment='center')
def _date_labels(dates):
months = np.array([mdt.month for mdt in dates])
months_idx = np.append([True], months[:-1] != months[1:])
strfs = np.array(['%d', '%b %d'])[months_idx.astype(int)]
return [dt.strftime(strf) for dt, strf in zip(dates, strfs)]
def plot_filtered_performance_calendar(subj, df, num_days=7, **kwargs):
'''
plots a calendar view of the performance for a subject on the past num_days.
'''
df2 = utils.filter_normal_trials(utils.filter_recent_days(df, num_days))
return plot_performance_calendar(subj, df2, **kwargs)
def plot_performance_calendar(subj, data_to_analyze, disp_counts=False, vmins=(0, 0, 0), vmaxs=(None, 1, None)):
'''
plots a calendar view of performance for a subject.
Plots all trials from data_to_analyze so make sure it is filtered.
Parameters:
-----------
subj : str
the subject
data_to_analyze : pandas DataFrame
filtered data to plot. Can be a slice, a copy is made anyways.
disp_counts : boolean
display values in grid, removes colorbars, default False
vmins, vmaxs : iterable of floats, length 3, optional
Values to anchor the colormaps. If None, they are inferred from the data.
'''
data_to_analyze = data_to_analyze.copy()
data_to_analyze['date'] = data_to_analyze.index.date
data_to_analyze['hour'] = data_to_analyze.index.hour
blocked = data_to_analyze.groupby(['date', 'hour'])
aggregated = pd.DataFrame(blocked.agg({'correct': lambda x: np.mean(x.astype(float)),
'reward': lambda x: np.sum(x.astype(float)),
'type_': lambda x: np.sum((x == 'normal').astype(float))}).to_records())
f, ax = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(16.0, 4.0))
columns = ('type_', 'correct', 'reward')
titles = (subj + ': Trials per hour', 'Accuracy', 'Feeds')
cmaps = [plt.get_cmap(cmap) for cmap in ('Oranges', 'RdYlBu', 'BuGn')]
for cmap in cmaps:
cmap.set_bad(color='Grey')
pivoted = aggregated.pivot('hour', 'date')
for i, (column, title, cmap, vmin, vmax) in enumerate(zip(columns, titles, cmaps, vmins, vmaxs)):
g = sns.heatmap(pivoted[column], annot=disp_counts, ax=ax[i],
cmap=cmap, cbar=not disp_counts,
vmin=vmin, vmax=vmax)
g.set_title(title)
idk_what_ppl_were_thinking = [x[1] for x in list(pivoted.keys())]
g.set_xticklabels(_date_labels(idk_what_ppl_were_thinking))
return f
def plot_filtered_accperstim(title, df, num_days=7, **kwargs):
'''
plots accuracy per stim for a subject on the past num_days.
'''
return plot_accperstim(title, utils.filter_normal_trials(utils.filter_recent_days(df, num_days)), **kwargs)
def plot_accperstim(title, data_to_analyze, stim_ids='stimulus', stims_all=None, label_count_cutoff=50, extract_stim_names=True):
'''
percent correct broken out by stimulus and day.
Parameters:
-----------
title : str
the plot title
data_to_analyze : pandas DataFrame
filtered data to plot. Can be a slice, a copy is made anyways.
stim_ids : str
label of the column to group-by.
stims_all : None or list-like
order of stims. must match values in stim_ids
label_count_cutoff : int
max number of stimuli labels. If below this value will sort stim_ids by class.
extract_stim_names : boolean
whether to extract stimuli names from full stimuli paths. If true, ignores stim_ids.
'''
data_to_analyze = data_to_analyze.copy()
if extract_stim_names:
stim_ids = 'stim_name'
utils.extract_filename(data_to_analyze, target=stim_ids)
data_to_analyze['date'] = data_to_analyze.index.date
blocked = data_to_analyze.groupby(['date', stim_ids])
aggregated = pd.DataFrame(blocked.agg(
{'correct': lambda x: np.mean(x.astype(float))}).to_records())
pivoted = aggregated.pivot(stim_ids, 'date', 'correct')
if stims_all:
yticklabels = stims_all
elif len(pivoted) < label_count_cutoff:
yticklabels = data_to_analyze.groupby(
['class_', stim_ids]).index.unique().index.get_level_values(stim_ids).values
else:
yticklabels = int(len(pivoted) / label_count_cutoff)
cmap = sns.diverging_palette(15, 250, as_cmap=True)
cmap.set_bad(color='k', alpha=0.5)
plt.figure()
g = sns.heatmap(pivoted, vmin=0, vmax=1, cmap=cmap,
xticklabels=_date_labels(list(pivoted.keys()).values),
yticklabels=yticklabels)
g.set_title(title)
return g
def plot_daily_accuracy(subj, df, x_axis='trial_num', smoothing='gaussian', day_lim=0):
'''
plots the accuracy of the subject throughout the day.
a preset for the more general plot_accuracy_bias
Parameters:
-----------
subj : str
the subject
df : pandas DataFrame
data frame of behavior data
x_axis : str
whether to plot 'time' or 'trial_num' along the x axis
smoothing : str
whether to smooth using 'exponential', 'rolling' average,
'gaussian' filter'
day_lim : None or non-negative int
max number of days of trials to include. Zero means just today.
'''
return plot_accuracy_bias(subj, df, x_axis=x_axis, smoothing=smoothing, trial_lim=None, day_lim=day_lim,
plt_correct_smoothed=True, plt_correct_shade=True, plt_correct_line=True,
plt_L_response_smoothed=False, plt_L_response_shade=False, plt_L_response_line=False,
plt_R_response_smoothed=False, plt_R_response_shade=False, plt_R_response_line=False,
plt_ci=False, block_size=100)
def plot_ci_accuracy(subj, df, x_axis='time', day_lim=7, trial_lim=None, bias=True):
'''
plots the accuracy (and bias) of the subject throughout the day.
a preset for the more general plot_accuracy_bias
Parameters:
-----------
subj : str
the subject
df : pandas DataFrame
data frame of behavior data
x_axis : str
whether to plot 'time' or 'trial_num' along the x axis
trial_lim : None or int
max number of most recent trials to include
day_lim : None or non-negative int
max number of days of trials to include. Zero means just today.
bias : boolean
whether to plot the line for the left bias
'''
return plot_accuracy_bias(subj, df, x_axis=x_axis, smoothing='rolling', trial_lim=None, day_lim=day_lim,
plt_correct_smoothed=True, plt_correct_shade=False, plt_correct_line=False,
plt_L_response_smoothed=bias, plt_L_response_shade=False, plt_L_response_line=False,
plt_R_response_smoothed=False, plt_R_response_shade=False, plt_R_response_line=False,
plt_ci=True, block_size=100)
def plot_accuracy_bias(subj, df, x_axis='time', smoothing='exponential', trial_lim=None, day_lim=7,
plt_correct_smoothed=True, plt_correct_shade=True, plt_correct_line=True,
plt_L_response_smoothed=False, plt_L_response_shade=False, plt_L_response_line=False,
plt_R_response_smoothed=False, plt_R_response_shade=False, plt_R_response_line=False,
plt_ci=False, block_size=100):
'''
plots the accuracy or bias of the subject.
Parameters:
-----------
subj : str
the subject
df : pandas DataFrame
data frame of behavior data
x_axis : str
whether to plot 'time' or 'trial_num' along the x axis
smoothing : str
whether to smooth using 'exponential', 'rolling' average,
'gaussian' filter'
trial_lim : None or int
max number of most recent trials to include
day_lim : None or non-negative int
max number of days of trials to include. Zero means just today.
plt_{correct, L_response, R_response}_smoothed : boolean
whether to plot a smoothed line for the value
plt_{correct, L_response, R_response}_shade : boolean
whether to plot a red shaded region filling in the line of actual responses
plt_{correct, L_response, R_response}_line : boolean
whether to plot a red line of the actual responses
'''
fig = plt.figure(figsize=(16, 2))
if trial_lim is not None:
df = df[-trial_lim:]
if day_lim is not None:
df = utils.filter_recent_days(df, day_lim)
df = utils.filter_normal_trials(df)
if x_axis == 'time':
x = df.index._mpl_repr()
use_index = True
elif x_axis == 'trial_num':
x = np.arange(len(df))
use_index = False
else:
raise Exception('invalid value for x_axis')
datas = (df['correct'].astype(float), df['response'].isin(['L','left']), df['response'].isin(['R','right']))
plot_smoothed_mask = (plt_correct_smoothed, plt_L_response_smoothed, plt_R_response_smoothed)
plot_shaded_mask = (plt_correct_shade, plt_L_response_shade, plt_R_response_shade)
plot_line_mask = (plt_correct_line, plt_L_response_line, plt_R_response_line)
for data, smoothed, shaded, line in zip(datas, plot_smoothed_mask, plot_shaded_mask, plot_line_mask):
if shaded:
plt.fill_between(x, .5, data.values.astype(bool), color='r', alpha=.25)
if line:
g = data.plot(color='r', marker='o', linewidth=.5, use_index=use_index)
if smoothed:
if smoothing == 'exponential':
data.ewm(halflife=20).mean().plot(use_index=use_index)
elif smoothing == 'gaussian':
plt.plot(x, ndimage.filters.gaussian_filter(
data.values.astype('float32'), 3, order=0))
elif smoothing == 'rolling':
data.rolling(window=block_size, center=True).mean().plot(use_index=use_index)
else:
raise Exception('invalid value for smoothing')
if plt_ci and smoothing == 'rolling':
ci = utils.binomial_ci(0.5 * block_size, block_size)
plt.axhspan(ci[0], ci[1], color='grey', alpha=0.5)
plt.axhline(y=.5, c='black', linestyle='dotted')
plt.title('Today\'s Performance: ' + subj)
plt.xlabel(x_axis)
return fig
def plot_trial_feeds(behav_data, num_days=7):
'''
plots numer of trials and number of feeds for all birds across time
Parameters:
-----------
behav_data : dict of pandas dataframes
from loading.load_data_pandas
num_days : non-negative int
number of days to include data for
'''
colors = sns.hls_palette(len(behav_data))
fig = plt.figure(figsize=(16.0, 4.0))
ax1 = fig.gca()
ax2 = ax1.twinx()
for (subj, df), color in zip(list(behav_data.items()), colors):
data_to_analyze = utils.filter_recent_days(df, num_days).copy()
if not data_to_analyze.empty:
data_to_analyze['date'] = data_to_analyze.index.date
blocked = data_to_analyze.groupby('date')
days = np.sort(list(blocked.groups.keys()))
trials_per_day = blocked['response'].count().values
line = ax1.plot(days, trials_per_day, label=subj + ' trials per day', c=color)
if len(days) == 1:
plot(0, trials_per_day[-1], 'o', c=color, ax=ax1)
aggregated = blocked.agg({'reward': lambda x: np.sum((x == True).astype(float))})
aggregated['reward'].plot(ax=ax2, label=subj + ' feeds per day', ls='--', c=color)
if len(days) == 1:
ax2.plot(0, aggregated['reward'][0], 'o', c=color)
plt.title('trials and feeds per day')
for ax, label, loc in zip((ax1, ax2), ('trials per day', 'feeds per day'), ('upper left', 'upper right')):
ax.set_ylabel(label)
ax.set_ylim(bottom=0)
ax.legend(loc=loc)
ax1.set_xticklabels(_date_labels(days))
return fig
|
gentnerlab/behav-analysis
|
behav/plotting.py
|
Python
|
bsd-3-clause
| 12,668
|
[
"Gaussian"
] |
216d0b69b8cc1647d5e61b1a762fdb8dcbd9822e3372002ccf1b36d7dcb4a8bc
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import *
import time
import numpy as np
from scipy import signal
import audiolazy
class Simulation(object):
"""The Simulation object can be used if a real experiment is not available.
:param model: An lti model used to simulate the temperature output.
:param sampling: The sampling time.
:param heater_resistance: A resistance in ohm used to calculate the voltage
drop.
:param sigma: The standart deviation of tjhe gaussian noise added to the simulated temperature
E.g.::
import heatcapacity as hc
sampling_time = 0.1
sim = hc.Simulation(
hc.FirstOrder.from_ck(0.004, 0.002),
sampling=sampling_time,
heater_resistance=1e3,
sigma=1.
)
pulse_sequence = [0.] * 60 * sampling_time + [0.001] * 60 * sampling_time
measurement = hc.PulsMeasurement(
currentsource=sim, powermeter=sim, thermometer=sim,
pulse=pulse_sequence, sampling_time=sampling_time)
measurement.start()
"""
def __init__(self, model, sampling, heater_resistance, sigma, ):
self.heater_resistance = heater_resistance
self.power = audiolazy.ControlStream(0.)
self.current = 0.
# convert model to discrete representation
num, den, dt = signal.cont2discrete((model.num, model.den), sampling)
model = audiolazy.ZFilter(list(num.flatten()), list(den.flatten()))
self.model = model(self.power) + audiolazy.gauss_noise(sigma=sigma)
@property
def current(self):
return self._current
@current.setter
def current(self, value):
self._current = value
self.power.value = self.voltage * self.current
@property
def voltage(self):
return self.heater_resistance * self.current
@property
def temperature(self):
"""Simulates the temperature response to the current change."""
return self.model.take()
|
p3trus/heatcapacity
|
heatcapacity/simulation.py
|
Python
|
bsd-3-clause
| 2,158
|
[
"Gaussian"
] |
9fd8f4be8de21cd3281037e35637c9231d6cd1d4261f54517e601de16b9f337b
|
# -*- coding: utf-8 -*-
u"""
==================================
Input and output (:mod:`scipy.io`)
==================================
.. currentmodule:: scipy.io
SciPy has many modules, classes, and functions available to read data
from and write data to a variety of file formats.
.. seealso:: `NumPy IO routines <https://www.numpy.org/devdocs/reference/routines.io.html>`__
MATLAB® files
=============
.. autosummary::
:toctree: generated/
loadmat - Read a MATLAB style mat file (version 4 through 7.1)
savemat - Write a MATLAB style mat file (version 4 through 7.1)
whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
IDL® files
==========
.. autosummary::
:toctree: generated/
readsav - Read an IDL 'save' file
Matrix Market files
===================
.. autosummary::
:toctree: generated/
mminfo - Query matrix info from Matrix Market formatted file
mmread - Read matrix from Matrix Market formatted file
mmwrite - Write matrix to Matrix Market formatted file
Unformatted Fortran files
===============================
.. autosummary::
:toctree: generated/
FortranFile - A file object for unformatted sequential Fortran files
Netcdf
======
.. autosummary::
:toctree: generated/
netcdf_file - A file object for NetCDF data
netcdf_variable - A data object for the netcdf module
Harwell-Boeing files
====================
.. autosummary::
:toctree: generated/
hb_read -- read H-B file
hb_write -- write H-B file
Wav sound files (:mod:`scipy.io.wavfile`)
=========================================
.. module:: scipy.io.wavfile
.. autosummary::
:toctree: generated/
read
write
WavFileWarning
Arff files (:mod:`scipy.io.arff`)
=================================
.. module:: scipy.io.arff
.. autosummary::
:toctree: generated/
loadarff
MetaData
ArffError
ParseArffError
"""
from __future__ import division, print_function, absolute_import
# matfile read and write
from .matlab import loadmat, savemat, whosmat, byteordercodes
# netCDF file support
from .netcdf import netcdf_file, netcdf_variable
# Fortran file support
from ._fortran import FortranFile
from .mmio import mminfo, mmread, mmwrite
from .idl import readsav
from .harwell_boeing import hb_read, hb_write
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
lhilt/scipy
|
scipy/io/__init__.py
|
Python
|
bsd-3-clause
| 2,453
|
[
"NetCDF"
] |
1ebc76aea26241cd7ac1bd1dca1dbe87ef8317a1ca02d15e6ecffcc357c81a8b
|
from unittest import skipIf
from openmoltools import utils, amber, packmol, gromacs
from distutils.spawn import find_executable
import os
def test_gromacs_merge():
etoh_filename = utils.get_data_filename("chemicals/etoh/etoh.mol2")
benzene_filename = utils.get_data_filename("chemicals/benzene/benzene.mol2")
with utils.enter_temp_directory(): #Prevents creating lots of tleap/antechamber files everywhere
#Generate frcmod files, mol2 files
gaff_mol2_filename1, frcmod_filename1 = amber.run_antechamber( "etoh", etoh_filename, charge_method = None)
gaff_mol2_filename2, frcmod_filename2 = amber.run_antechamber( "benzene", benzene_filename, charge_method = None)
#Set file names
prmtop_filename1 = "./out1.prmtop"
prmtop_filename2 = "./out2.prmtop"
crd_filename1 = "./out1.inpcrd"
crd_filename2 = "./out2.inpcrd"
top_filename1 = "./out1.top"
top_filename2 = "./out2.top"
gro_filename1 = "./out1.gro"
gro_filename2 = "./out2.gro"
#Generate AMBER files
amber.run_tleap( 'etoh', gaff_mol2_filename1, frcmod_filename1, prmtop_filename1, crd_filename1 )
amber.run_tleap( 'benzene', gaff_mol2_filename2, frcmod_filename2, prmtop_filename2, crd_filename2 )
#Convert to GROMACS
utils.convert_via_acpype( "etoh", prmtop_filename1, crd_filename1, out_top = top_filename1, out_gro = gro_filename1 )
utils.convert_via_acpype( "benzene", prmtop_filename2, crd_filename2, out_top = top_filename2, out_gro = gro_filename2 )
#Merge topologies
gromacs.merge_topologies( [ top_filename1, top_filename2], './combined.top', 'combined', molecule_numbers = [1, 5], molecule_names = ['etoh', 'benzene'] )
#Test editing of molecule numbers in topology file
gromacs.change_molecules_section( './combined.top', './edited.top', ['etoh', 'benzene'], [10, 20] )
@skipIf(gromacs.GROMACS_PATH is None, "Skipping testing of GROMACS solvation because GROMACS not found.")
def test_gromacs_solvate():
etoh_filename = utils.get_data_filename("chemicals/etoh/etoh.mol2")
with utils.enter_temp_directory(): #Prevents creating lots of tleap/antechamber files everywhere
#Generate frcmod files, mol2 files
gaff_mol2_filename, frcmod_filename = amber.run_antechamber( "etoh", etoh_filename, charge_method = None)
#Amber setup
amber.run_tleap( 'etoh', gaff_mol2_filename, frcmod_filename, 'etoh.prmtop', 'etoh.crd' )
#GROMACS conversion
utils.convert_via_acpype( 'etoh', 'etoh.prmtop', 'etoh.crd', 'etoh.top', 'etoh.gro' )
#Solvate
gromacs.do_solvate( 'etoh.top', 'etoh.gro', 'etoh_solvated.top', 'etoh_solvated.gro', 1.2, 'dodecahedron', 'spc216', 'tip3p.itp' )
|
jchodera/openmoltools
|
openmoltools/tests/test_gromacs.py
|
Python
|
gpl-2.0
| 2,794
|
[
"Amber",
"Gromacs"
] |
f195e6738ca3a6d8b6f933c55423df6b8287c3aedca23dbad728eafa583ad858
|
"""
Custom nodes for a Tree Editor that provide views for adding various nodes
to the tree.
"""
# Authors: Judah De Paula <judah@enthought.com>
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import (HasTraits, Str, Property, Any, Button,
List, Instance, implements,
ToolbarButton)
from traitsui.api import View, Item, Group,\
TextEditor, TreeEditor, TreeNode, ListEditor, ITreeNode
from pyface.api import ImageResource
from pyface.resource.api import resource_path
# Local imports.
from .registry import registry
###############################################################################
# AdderNode class
###############################################################################
class AdderNode(TreeNode):
""" Base class that will display a TreeNode to add items to the tree.
"""
implements(ITreeNode)
# String to be shown in the TreeEditor.
label = Str('Base AdderNode')
# Default tooltip for this class.
tooltip = Str('Add an item')
# The parent object that should be manipulated for adding children.
object = Any
# Duck-typing is necessary since Mayavi assumes nodes always have scenes.
scene = Property
# Trait view to show in the Mayavi current object panel.
view = View(Group(label='AdderNode'))
def dialog_view(self):
""" View shown by double-clicking on the node. Same as in Base().
"""
view = self.trait_view()
view.buttons = [ ]
view.title = self.label
view.icon = ImageResource('add.ico')
view.resizable = True
view.width = 350
view.height = 650
return view
def _get_scene(self):
""" Trait Property getter for 'scene'.
"""
object = self.object
if isinstance(object, AdderNode):
return None
if object is not None:
return object.scene
else:
return None
#------------------------------------------------------------------------
# The ITreeNode interface needed by the Qt tree_editor
#------------------------------------------------------------------------
def get_label(self):
return self.label
def get_icon(self, obj, is_expanded=False):
return self.icon_name
def get_icon_path(self):
return resource_path()
def get_tooltip(self):
return self.tooltip
def allows_children(self):
return False
def get_children_id(self, node=None):
return []
def when_label_changed(self, label_updated, remove):
return
def when_column_labels_change(self, listener, remove):
return
###############################################################################
# SceneAdderNode class
###############################################################################
class SceneAdderNode(AdderNode):
""" Subclass for adding Scene nodes to a Mayavi Engine node.
"""
# String to be shown in the TreeEditor.
label = Str('Add a new scene')
# The name of the icon
icon_name = Str('add_scene.png')
# Button for the View.
add_scene = Button('Add a new scene',
image=ImageResource('add_scene.png'))
# Trait view to show in the Mayavi current object panel.
view = View(Group(Item('add_scene', show_label=False, style='custom'),
label='Add a scene'))
def _add_scene_fired(self):
""" Trait handler for when the add_scene button is clicked.
"""
self.object.new_scene()
###############################################################################
# DocumentedItem class
###############################################################################
class DocumentedItem(HasTraits):
""" Container to hold a name and a documentation for an action.
"""
# Name of the action
name = Str
# Button to trigger the action
add = ToolbarButton('Add', orientation='horizontal',
image=ImageResource('add.ico'))
# Object the action will apply on
object = Any
# Two lines documentation for the action
documentation = Str
view = View('_',
Item('add', style='custom', show_label=False),
Item('documentation', style='readonly',
editor=TextEditor(multi_line=True),
resizable=True,
show_label=False),
)
def _add_fired(self):
""" Trait handler for when the add_source button is clicked in
one of the sub objects in the list.
"""
action = getattr(self.object.menu_helper, self.id)
action()
def documented_item_factory(name='', documentation='',
id='', object=None):
""" Factory for creating a DocumentedItem with the right button
label.
"""
documentation = documentation.replace('\n', '')
documentation = documentation.replace(' ', '')
class MyDocumentedItem(DocumentedItem):
add = ToolbarButton('%s' % name, orientation='horizontal',
image=ImageResource('add.ico'))
return MyDocumentedItem(
name=name,
documentation=documentation,
id=id,
object=object)
###############################################################################
# ListAdderNode class
###############################################################################
class ListAdderNode(AdderNode):
""" A node for adding object, with a list of objects to add generated
from the registry.
"""
# The list of items to display to the user.
items_list = List(DocumentedItem)
# A reference to the registry, to generate this list.
items_list_source = List()
# Selected item
selected_item = Instance(DocumentedItem)
# A reference to self, to allow to build the tree view.
self = Instance(AdderNode)
# The icon of the displayed objects
icon_name = Str('add.ico')
def _self_default(self):
return self
def default_traits_view(self):
nodes = [TreeNode(node_for=[AdderNode],
label='name',
copy=False,
delete=False,
rename=False,
children='items_list',
),
TreeNode(node_for=[DocumentedItem],
label='name',
copy=False,
delete=False,
rename=False,
icon_item=self.icon_name,
),
]
tree_editor = TreeEditor(editable=False,
hide_root=True,
orientation='vertical',
selected='object.selected_item',
nodes=nodes,
on_dclick='object._on_tree_dclick',
)
view = View(Item('self',
show_label=False,
editor=tree_editor,
resizable=True,
springy=True,
height=0.5),
Item('selected_item', style='custom', show_label=False,
height=0.5),
resizable=True)
return view
def _object_changed(self, value):
""" Trait handler for when the self.object trait changes.
"""
result = []
if value is not None:
# Don't need 'x', but do need to generate the actions.
x = value.menu_helper.actions
for src in self.items_list_source:
if not self._is_action_suitable(value, src):
continue
name = src.menu_name.replace('&','')
result.append(
documented_item_factory(
name=name,
documentation=src.help,
id=src.id,
object=value)
)
self.items_list = result
def _is_action_suitable(self, object, src):
""" Check that the action described by src can be applied on the
given object.
"""
if hasattr(object.menu_helper, 'check_%s' % src.id) \
and getattr(object.menu_helper, 'check_%s' % src.id)():
return True
else:
return False
def _on_tree_dclick(self, object):
""" Called when an user double clicks on an item in the tree
view.
"""
object._add_fired()
###############################################################################
# SourceAdderNode class
###############################################################################
class SourceAdderNode(ListAdderNode):
""" Tree node that presents a view to the user to add a scene source.
"""
# Button for adding a data file, with automatic format checking.
open_file = ToolbarButton('Load data from file',
orientation='horizontal',
image=ImageResource('file.png'))
# A reference to the registry, to generate this list.
items_list_source = [source for source in registry.sources
if len(source.extensions) == 0]
# The string to display on the icon in the TreeEditor.
label = 'Add Data Source'
# The icon of the displayed objects
icon_name = Str('source.ico')
# Trait view to show in the Mayavi current object panel.
def default_traits_view(self):
return View(Group(Group(Item('open_file', style='custom'),
show_labels=False, show_border=False),
Item('items_list', style='readonly',
editor=ListEditor(style='custom')),
show_labels=False,
label='Add a data source'))
def _open_file_fired(self):
""" Trait handler for when the open_file button is clicked.
"""
self.object.menu_helper.open_file_action()
def _is_action_suitable(self, object, src):
return True
###############################################################################
# ModuleAdderNode class
###############################################################################
class ModuleAdderNode(ListAdderNode):
""" Tree node that presents a view to the user to add modules.
"""
# String to be shown in the TreeEditor.
label = Str('Add a visualization module')
# The icon of the displayed objects
icon_name = Str('module.ico')
# A reference to the registry, to generate this list.
items_list_source = registry.modules
def _object_changed(self, value):
if value is not None:
value.menu_helper._build_filter_actions()
ListAdderNode._object_changed(self, value)
###############################################################################
# FilterAdderNode class
###############################################################################
class FilterAdderNode(ListAdderNode):
""" Tree node that presents a view to the user to add filters.
"""
# String to be shown in the TreeEditor.
label = Str('Add a processing filter')
# The icon of the displayed objects
icon_name = Str('filter.ico')
# A reference to the registry, to generate this list.
items_list_source = registry.filters
###############################################################################
# ModuleFilterAdderNode class
###############################################################################
class ModuleFilterAdderNode(AdderNode):
""" Tree node that presents a view to the user to add filter and
modules.
"""
# The string to display on the icon in the TreeEditor.
label = 'Add module or filter'
# An adder node for modules
modules = Instance(ModuleAdderNode, ())
# An adder node for filters
filters = Instance(FilterAdderNode, ())
def _object_changed(self):
""" Propagate the object to the sub nodes.
"""
self.filters.object = self.object
self.modules.object = self.object
# Trait view to show in the Mayavi current object panel.
view = View(
Group(Item('modules', style='custom', springy=True,
resizable=True,
height=1.,
),
show_labels=False,
label='Visualization modules'),
Group(Item('filters', style='custom', springy=True,
resizable=True,
height=1.,
),
show_labels=False,
label='Processing filters'),
)
### EOF #######################################################################
|
liulion/mayavi
|
mayavi/core/adder_node.py
|
Python
|
bsd-3-clause
| 13,368
|
[
"Mayavi"
] |
059890a44c8d27506d787f7410c2fd0bbd79f418b71d11eae17167a22f540c04
|
# Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz@sorosny.org
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from datetime import datetime
import math
import os
from pprint import pformat
from zope.component import queryUtility
from repoze.bfg.interfaces import ISettings
from repoze.bfg.traversal import model_path
from repoze.bfg.traversal import find_interface
from repoze.folder.interfaces import IFolder
from repoze.lemonade.content import is_content
from karl.models.interfaces import ILetterManager
from karl.models.interfaces import ICommunity
from karl.models.interfaces import IProfile
from karl.models.peopledirectory import reindex_peopledirectory
from karl.utils import find_catalog
from karl.utils import find_peopledirectory
from karl.utils import find_peopledirectory_catalog
from karl.utils import find_profiles
from karl.utils import find_tags
from karl.utils import find_users
def postorder(startnode):
def visit(node):
if IFolder.providedBy(node):
for child in node.values():
for result in visit(child):
yield result
yield node
return visit(startnode)
def index_content(obj, event):
""" Index content (an IObjectAddedEvent subscriber) """
catalog = find_catalog(obj)
if catalog is not None:
for node in postorder(obj):
if is_content(obj):
path = model_path(node)
docid = getattr(node, 'docid', None)
if docid is None:
docid = node.docid = catalog.document_map.add(path)
else:
catalog.document_map.add(path, docid)
catalog.index_doc(docid, node)
def unindex_content(obj, docids):
""" Unindex given 'docids'.
"""
catalog = find_catalog(obj)
if catalog is not None:
for docid in docids:
catalog.unindex_doc(docid)
catalog.document_map.remove_docid(docid)
def cleanup_content_tags(obj, docids):
""" Remove any tags associated with 'docids'.
"""
tags = find_tags(obj)
if tags is not None:
for docid in docids:
tags.delete(item=docid)
def handle_content_removed(obj, event):
""" IObjectWillBeRemovedEvent subscriber.
"""
catalog = find_catalog(obj)
if catalog is not None:
path = model_path(obj)
num, docids = catalog.search(path={'query': path,
'include_path': True})
unindex_content(obj, docids)
cleanup_content_tags(obj, docids)
def reindex_content(obj, event):
""" Reindex a single piece of content (non-recursive); an
IObjectModifed event subscriber """
catalog = find_catalog(obj)
if catalog is not None:
path = model_path(obj)
docid = catalog.document_map.docid_for_address(path)
catalog.reindex_doc(docid, obj)
def set_modified(obj, event):
""" Set the modified date on a single piece of content
unconditionally (non-recursive); an IObjectModified event
subscriber"""
now = datetime.now()
obj.modified = now
_modify_community(obj, now)
def set_created(obj, event):
""" Add modified and created attributes to nodes which do not yet
have them (recursively); an IObjectWillBeAddedEvent subscriber"""
now = datetime.now()
for node in postorder(obj):
if is_content(obj):
if not getattr(node, 'modified', None):
node.modified = now
if not getattr(node, 'created', None):
node.created = now
parent = getattr(event, 'parent', None)
if parent is not None:
_modify_community(parent, now)
def _modify_community(obj, when):
# manage content_modified on community whenever a piece of content
# in a community is changed
community = find_interface(obj, ICommunity)
if community is not None:
community.content_modified = when
def delete_community(obj, event):
# delete the groups related to the community when a community is
# deleted
context = obj
users = find_users(context)
users.delete_group(context.members_group_name)
users.delete_group(context.moderators_group_name)
# manage alphabet ('title startswith') listing: optimization for letter links
def alpha_added(obj, event):
adapter = ILetterManager(obj)
adapter.delta(1)
def alpha_removed(obj, event):
adapter = ILetterManager(obj)
adapter.delta(-1)
# "Index" profile e-mails into the profiles folder.
def _remove_email(parent, name):
mapping = getattr(parent, 'email_to_name')
filtered = [x for x in mapping.items() if x[1] != name]
mapping.clear()
mapping.update(filtered)
def profile_added(obj, event):
parent = obj.__parent__
name = obj.__name__
_remove_email(parent, name)
parent.email_to_name[obj.email] = name
def profile_removed(obj, event):
parent = obj.__parent__
name = obj.__name__
_remove_email(parent, name)
def index_profile(obj, event):
""" Index profile (an IObjectAddedEvent subscriber) """
catalog = find_peopledirectory_catalog(obj)
if catalog is not None:
for node in postorder(obj):
if IProfile.providedBy(node):
path = model_path(node)
docid = getattr(node, 'docid', None)
if docid is None:
docid = node.docid = catalog.document_map.add(path)
else:
catalog.document_map.add(path, docid)
catalog.index_doc(docid, node)
def unindex_profile(obj, event):
""" Unindex profile (an IObjectWillBeRemovedEvent subscriber) """
catalog = find_peopledirectory_catalog(obj)
if catalog is not None:
path = model_path(obj)
path_docid = catalog.document_map.docid_for_address(path)
num, docids = catalog.search(path=path)
for docid in docids:
# unindex any children of the path first
catalog.unindex_doc(docid)
catalog.document_map.remove_docid(docid)
if path_docid is not None:
# and then finally the parent
catalog.unindex_doc(path_docid)
catalog.document_map.remove_docid(path_docid)
def reindex_profile(obj, event):
""" Reindex a single piece of profile (non-recursive); an
IObjectModifed event subscriber """
catalog = find_peopledirectory_catalog(obj)
if catalog is not None:
path = model_path(obj)
docid = catalog.document_map.docid_for_address(path)
catalog.unindex_doc(docid)
catalog.index_doc(docid, obj)
def reindex_profile_after_group_change(event):
""" Subscriber for group change events to reindex the profile
in peopledir catalog """
profiles = find_profiles(event.site)
profile = profiles.get(event.id)
if profile is not None:
catalog = find_peopledirectory_catalog(profile)
if catalog is not None:
path = model_path(profile)
docid = catalog.document_map.docid_for_address(path)
catalog.unindex_doc(docid)
catalog.index_doc(docid, profile)
def update_peopledirectory_indexes(event):
"""Updates the peopledir catalog schema.
This is an IPeopleDirectorySchemaChanged subscriber.
"""
peopledir = event.peopledir
if peopledir.update_indexes():
reindex_peopledirectory(peopledir)
class QueryLogger(object):
"""Event listener that logs ICatalogQueryEvents to a directory.
Divides the log files by the magnitude of the query duration,
making it easy to find expensive queries.
"""
def __init__(self):
self._configured = False
self.log_dir = None
self.min_duration = None
def configure(self, settings):
self.log_dir = getattr(settings, 'query_log_dir', None)
if self.log_dir:
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.min_duration = float(
getattr(settings, 'query_log_min_duration', 0.0))
self._configured = True
def __call__(self, event):
if not self._configured:
settings = queryUtility(ISettings)
if settings is not None:
self.configure(settings)
if not self.log_dir:
return
duration = event.duration
if duration < self.min_duration:
return
t = datetime.now().isoformat()
query = ' ' + pformat(event.query).replace('\n', '\n ')
msg = '%s %8.3f %6d\n%s\n' % (
t, duration, event.result[0], query)
magnitude = math.ceil(math.log(duration, 2))
fn = '%07d.log' % int(1000 * 2**magnitude)
path = os.path.join(self.log_dir, fn)
f = open(path, 'a')
try:
f.write(msg)
finally:
f.close()
log_query = QueryLogger()
|
boothead/karl
|
karl/models/subscribers.py
|
Python
|
gpl-2.0
| 9,622
|
[
"VisIt"
] |
9d563b9cdaa3e4d706d983139412f59f6e12b551d02f69a92fc79851a7194b31
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from unittest import TestCase
from commoncode.codec import bin_to_num, num_to_bin
from commoncode.codec import _encode, to_base_n
from commoncode.codec import to_base10, to_base85
class TestCodec(TestCase):
def test_bin_to_num_basic(self):
expected = 123
result = bin_to_num('{')
assert expected == result
def test_bin_to_num_null(self):
expected = 0
result = bin_to_num('\x00')
assert expected == result
def test_bin_to_num_large_number(self):
expected = 432346237462348763
result = bin_to_num('\x06\x00\x00\x9c\xbf\xeb\x83\xdb')
assert expected == result
def test_bin_to_num_and_num_to_bin_is_idempotent(self):
expected = 432346237462348763
result = bin_to_num(num_to_bin(432346237462348763))
assert expected == result
def test_num_to_bin_basic(self):
expected = '{'
result = num_to_bin(123)
assert expected == result
def test_num_to_bin_null(self):
expected = ''
result = num_to_bin(0)
assert expected == result
def test_num_to_bin_large_number(self):
expected = '\x06\x00\x00\x9c\xbf\xeb\x83\xdb'
result = num_to_bin(432346237462348763)
assert expected == result
def test_num_to_bin_bin_to_num_is_idempotent(self):
expected = '\x06\x00\x00\x9c\xbf\xeb\x83\xdb'
result = num_to_bin(bin_to_num('\x06\x00\x00\x9c\xbf\xeb\x83\xdb'))
assert expected == result
def test_encode_zero(self):
assert '' == _encode(0)
def test_encode_basic(self):
assert 'HKq1w7M=' == _encode(123123123123)
def test_encode_limit_8bits_255(self):
assert '_w==' == _encode(255)
def test_encode_limit_8bits_256(self):
assert 'AQA=' == _encode(256)
def test_encode_adds_no_padding_for_number_that_are_multiple_of_6_bits(self):
assert '____________' == _encode(0xFFFFFFFFFFFFFFFFFF)
assert 8 == len(_encode(0xFFFFFFFFFFFF))
def test_encode_very_large_number(self):
b64 = ('QAAAAAAgAAAAAQAACAAAAAAAAAAAAAAkAAIAAAAAAAAAAAAAAACAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAiAAAAAAAIAAAAAAAAAAAAAAEAACAAAAAAAA=')
expected = b64
num = 2678771517966886466622496485850735537232223496190189203248435106535830319026141316924949516664780383591425235756710588949364368366679435700855700642969357960349427980681242720502045830438444033569999428606714388704082526548154984676817460705606960919023941301616034362869262429593297635158449513824256L
result = _encode(num)
assert expected == result
def test_base64_is_idempotent(self):
for i in [0, 63, 782963129, 99999999, 2147483647]:
assert i == to_base10(to_base_n(i, 64), 64)
def test_base36_is_idempotent(self):
for i in [0, 63, 782963129, 99999999, 2147483647]:
assert i == to_base10(to_base_n(i, 36), 36)
def test_base85_is_idempotent(self):
# we use this for 12-bit hashes
for i in [0, 63, 100, 1000, 4095, 4294967295]:
assert i == to_base10(to_base85(i), 85)
def test_to_base_n_with_unknown_base_raise_exception(self):
try:
to_base_n(892103712, 86)
except AssertionError:
pass
try:
to_base_n(892103712, 16522)
except AssertionError:
pass
try:
to_base_n(892103712, 1)
except AssertionError:
pass
|
retrography/scancode-toolkit
|
tests/commoncode/test_codec.py
|
Python
|
apache-2.0
| 4,920
|
[
"VisIt"
] |
82d73c037003f6a132c1c62665316510921620c931aa55faeac62abc88738e7b
|
""" VOMSService class encapsulates connection to the VOMS service for a given VO
"""
import requests
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Security.Locations import getProxyLocation, getCAsLocation
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOOption
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO
class VOMSService(object):
def __init__(self, vo=None):
"""c'tor
:param str vo: name of the virtual organization (community)
"""
if vo is None:
vo = getVO()
if not vo:
raise Exception("No VO name given")
self.vo = vo
self.vomsVO = getVOOption(vo, "VOMSName")
if not self.vomsVO:
raise Exception("Can not get VOMS name for VO %s" % vo)
self.urls = []
result = gConfig.getSections("/Registry/VO/%s/VOMSServers" % self.vo)
if result["OK"]:
for server in result["Value"]:
gLogger.verbose("Adding 'https://%s:8443/voms/%s/apiv2/users'" % (server, self.vomsVO))
self.urls.append("https://%s:8443/voms/%s/apiv2/users" % (server, self.vomsVO))
else:
gLogger.error("Section '/Registry/VO/%s/VOMSServers' not found" % self.vo)
self.userDict = None
def attGetUserNickname(self, dn, _ca=None):
"""Get user nickname for a given DN if any
:param str dn: user DN
:param str _ca: CA, kept for backward compatibility
:return: S_OK with Value: nickname
"""
if self.userDict is None:
result = self.getUsers()
if not result["OK"]:
return result
uDict = self.userDict.get(dn)
if not uDict:
return S_ERROR(DErrno.EVOMS, "No nickname defined")
nickname = uDict.get("nickname")
if not nickname:
return S_ERROR(DErrno.EVOMS, "No nickname defined")
return S_OK(nickname)
def getUsers(self):
"""Get all the users of the VOMS VO with their detailed information
:return: user dictionary keyed by the user DN
"""
if not self.urls:
return S_ERROR(DErrno.ENOAUTH, "No VOMS server defined")
userProxy = getProxyLocation()
caPath = getCAsLocation()
rawUserList = []
result = None
for url in self.urls:
rawUserList = []
startIndex = 0
result = None
error = None
urlDone = False
while not urlDone:
try:
result = requests.get(
url,
headers={"X-VOMS-CSRF-GUARD": "y"},
cert=userProxy,
verify=caPath,
params={"startIndex": str(startIndex), "pageSize": "100"},
)
except requests.ConnectionError as exc:
error = "%s:%s" % (url, repr(exc))
urlDone = True
continue
if result.status_code != 200:
error = "Failed to contact the VOMS server: %s" % result.text
urlDone = True
continue
userList = result.json()["result"]
rawUserList.extend(userList)
if len(userList) < 100:
urlDone = True
startIndex += 100
# This URL did not work, try another one
if error:
continue
else:
break
if error:
return S_ERROR(DErrno.ENOAUTH, "Failed to contact the VOMS server: %s" % error)
# We have got the user info, reformat it
resultDict = {}
for user in rawUserList:
for cert in user["certificates"]:
dn = cert["subjectString"]
resultDict[dn] = user
resultDict[dn]["CA"] = cert["issuerString"]
resultDict[dn]["certSuspended"] = cert.get("suspended")
resultDict[dn]["suspended"] = user.get("suspended")
resultDict[dn]["mail"] = user.get("emailAddress")
resultDict[dn]["Roles"] = user.get("fqans")
attributes = user.get("attributes")
if attributes:
for attribute in user.get("attributes", []):
if attribute.get("name") == "nickname":
resultDict[dn]["nickname"] = attribute.get("value")
self.userDict = dict(resultDict)
return S_OK(resultDict)
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Security/VOMSService.py
|
Python
|
gpl-3.0
| 4,693
|
[
"DIRAC"
] |
f75197066ed8705bf36bb03b210c0cb78bfca636087fd6e0a5c58d65167c79a2
|
# -*- coding: utf-8 -*-
###############################################################################
#
# This source file is part of the tomviz project.
#
# Copyright Kitware, Inc.
#
# This source code is released under the New BSD License, (the "License").
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import numpy as np
import scipy.io
from tomviz.io import FileType, IOBase, Reader
import tomviz.utils
from vtk import vtkImageData
class MatlabBase(IOBase):
@staticmethod
def file_type():
return FileType('MATLAB binary format', ['mat'])
class MatlabReader(Reader, MatlabBase):
def read(self, path):
mat_dict = scipy.io.loadmat(path)
data = None
for item in mat_dict.values():
# Assume only one 3D array per file
if isinstance(item, np.ndarray):
if len(item.shape) == 3:
data = item
break
if data is None:
return vtkImageData()
image_data = vtkImageData()
(x, y, z) = data.shape
image_data.SetOrigin(0, 0, 0)
image_data.SetSpacing(1, 1, 1)
image_data.SetExtent(0, x - 1, 0, y - 1, 0, z - 1)
tomviz.utils.set_array(image_data, data)
return image_data
|
mathturtle/tomviz
|
tomviz/python/tomviz/io/formats/matlab.py
|
Python
|
bsd-3-clause
| 1,632
|
[
"VTK"
] |
c4b351a0149130cc3ec20b622c9d3ecdf7a71504b84552355329fb8f5e83c2fc
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2011 Sebastian Pölsterl
#
# Permission is granted to copy, distribute and/or modify this document
# under the terms of the GNU Free Documentation License, Version 1.3
# or any later version published by the Free Software Foundation;
# with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
import sys
sys.path.insert(0, '../..')
import pgi
pgi.install_as_gi()
from gi.repository import Gtk
class LinkButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="LinkButton Demo")
self.set_border_width(10)
button = Gtk.LinkButton("http://www.gtk.org", "Visit GTK+ Homepage")
self.add(button)
win = LinkButtonWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
pwaller/pgi
|
examples/gtk/linkbutton_example.py
|
Python
|
lgpl-2.1
| 820
|
[
"VisIt"
] |
6ecc6b47ee22d643f0867e89e9398cc1394be8b3796cf2a69504073f31710b85
|
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
import ipfdicttype
import cv
class IPFAdaptiveThresholdMethod(ipfdicttype.IPFDictType):
""" Adaptive Threshold Method type dict
"""
name = "IPFAdaptiveThresholdMethod"
dictionary = {"Mean" : cv.CV_ADAPTIVE_THRESH_MEAN_C,
"Gaussian" : cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
}
def __init__(self):
pass
@classmethod
def default_value(cls):
""" Return default value for this type """
return cls.dictionary["Mean"]
|
anton-golubkov/Garland
|
src/ipf/ipftype/ipfadaptivethresholdmethod.py
|
Python
|
lgpl-2.1
| 1,073
|
[
"Gaussian"
] |
44dbc30df7441a11c64c77679698d8bd80b1114fabf41017f5d5310d140b90f8
|
# -*- coding: utf-8 -*-
"""This file contains a plugin for parsing Google Analytics cookies."""
from urllib import parse as urlparse
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers.cookie_plugins import interface
from plaso.parsers.cookie_plugins import manager
# TODO: determine if __utmc always 0?
class GoogleAnalyticsEventData(events.EventData):
"""Google Analytics event data.
Attributes:
cookie_name (str): name of cookie.
domain_hash (str): domain hash.
pages_viewed (int): number of pages viewed.
sessions (int): number of sessions.
sources (int): number of sources.
url (str): URL or path where the cookie got set.
visitor_id (str): visitor identifier.
"""
DATA_TYPE = 'cookie:google:analytics'
def __init__(self, cookie_identifier):
"""Initializes event data.
Args:
cookie_identifier (str): unique identifier of the cookie.
"""
data_type = '{0:s}:{1:s}'.format(self.DATA_TYPE, cookie_identifier)
super(GoogleAnalyticsEventData, self).__init__(data_type=data_type)
self.cookie_name = None
self.domain_hash = None
self.pages_viewed = None
self.sessions = None
self.sources = None
self.url = None
self.visitor_id = None
class GoogleAnalyticsUtmaPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utma Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<visitor ID>.<first visit>.<previous visit>.<last visit>.
<number of sessions>
For example:
137167072.1215918423.1383170166.1383170166.1383170166.1
Or:
<last visit>
For example:
13113225820000000
"""
NAME = 'google_analytics_utma'
DATA_FORMAT = 'Google Analytics __utma cookie'
COOKIE_NAME = '__utma'
def _ParseCookieData(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts events from cookie data.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (str): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields not in (1, 6):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
visitor_identifier = None
first_visit_posix_time = None
previous_visit_posix_time = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_sessions = None
elif number_of_fields == 6:
domain_hash = fields[0]
visitor_identifier = fields[1]
# TODO: Double check this time is stored in UTC and not local time.
try:
first_visit_posix_time = int(fields[2], 10)
except ValueError:
first_visit_posix_time = None
try:
previous_visit_posix_time = int(fields[3], 10)
except ValueError:
previous_visit_posix_time = None
try:
last_visit_posix_time = int(fields[4], 10)
except ValueError:
last_visit_posix_time = None
try:
number_of_sessions = int(fields[5], 10)
except ValueError:
number_of_sessions = None
event_data = GoogleAnalyticsEventData('utma')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.sessions = number_of_sessions
event_data.url = url
event_data.visitor_id = visitor_identifier
if first_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=first_visit_posix_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Analytics Creation Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
if previous_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=previous_visit_posix_time)
event = time_events.DateTimeValuesEvent(
date_time, 'Analytics Previous Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
date_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
elif first_visit_posix_time is None and previous_visit_posix_time is None:
# If both creation_time and written_time are None produce an event
# object without a timestamp.
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
if date_time is not None:
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleAnalyticsUtmbPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmb Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<pages viewed>.<unknown>.<last time>
For example:
137167072.1.10.1383170166
173272373.6.8.1440489514899
173272373.4.9.1373300660574
Or:
<last time>
For example:
13113225820000000
"""
NAME = 'google_analytics_utmb'
DATA_FORMAT = 'Google Analytics __utmb cookie'
COOKIE_NAME = '__utmb'
def _ParseCookieData(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts events from cookie data.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields not in (1, 4):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_pages_viewed = None
elif number_of_fields == 4:
domain_hash = fields[0]
try:
number_of_pages_viewed = int(fields[1], 10)
except ValueError:
number_of_pages_viewed = None
try:
if fields[2] in ('8', '9'):
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[3], 10) / 1000
else:
last_visit_posix_time = int(fields[3], 10)
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmb')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.pages_viewed = number_of_pages_viewed
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleAnalyticsUtmtPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmt Google Analytics cookies.
The structure of the cookie data:
<last time>
For example:
13113215173000000
"""
NAME = 'google_analytics_utmt'
DATA_FORMAT = 'Google Analytics __utmt cookie'
COOKIE_NAME = '__utmt'
def _ParseCookieData(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts events from cookie data.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmt')
event_data.cookie_name = self.COOKIE_NAME
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleAnalyticsUtmzPlugin(interface.BaseCookiePlugin):
"""A browser cookie plugin for __utmz Google Analytics cookies.
The structure of the cookie data:
<domain hash>.<last time>.<sessions>.<sources>.<variables>
For example:
207318870.1383170190.1.1.utmcsr=google|utmccn=(organic)|utmcmd=organic|
utmctr=(not%20provided)
Or:
<last time>
For example:
13128990382000000
"""
NAME = 'google_analytics_utmz'
DATA_FORMAT = 'Google Analytics __utmz cookie'
COOKIE_NAME = '__utmz'
def _ParseCookieData(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts events from cookie data.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (str): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields > 5:
variables = '.'.join(fields[4:])
fields = fields[0:4]
fields.append(variables)
number_of_fields = len(fields)
if number_of_fields not in (1, 5):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_sessions = None
number_of_sources = None
extra_attributes = {}
elif number_of_fields == 5:
domain_hash = fields[0]
try:
last_visit_posix_time = int(fields[1], 10)
except ValueError:
last_visit_posix_time = None
try:
number_of_sessions = int(fields[2], 10)
except ValueError:
number_of_sessions = None
try:
number_of_sources = int(fields[3], 10)
except ValueError:
number_of_sources = None
extra_variables = fields[4].split('|')
extra_attributes = {}
for variable in extra_variables:
key, _, value = variable.partition('=')
extra_attributes[key] = urlparse.unquote(value)
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmz')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.sessions = number_of_sessions
event_data.sources = number_of_sources
event_data.url = url
for key, value in extra_attributes.items():
setattr(event_data, key, value)
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.CookiePluginsManager.RegisterPlugins([
GoogleAnalyticsUtmaPlugin, GoogleAnalyticsUtmbPlugin,
GoogleAnalyticsUtmtPlugin, GoogleAnalyticsUtmzPlugin])
|
joachimmetz/plaso
|
plaso/parsers/cookie_plugins/ganalytics.py
|
Python
|
apache-2.0
| 12,786
|
[
"VisIt"
] |
9cf0df7fd4d2f7fc8acd0a2415068312aaaaf93c5b751f02e2e4cb258c9917e5
|
#!/usr/bin/python2.4
# encoding: utf-8
"""
ppi.py
High-level functions for interacting with the protein-protein interaction sections of the ddG database.
Classes:
BindingAffinityDDGInterface - an class used to interface with the database. Call get_interface to get a user API based on this class.
Created by Shane O'Connor 2015.
Copyright (c) 2015 __UCSF__. All rights reserved.
"""
import pprint
from io import BytesIO
import os
import sys
import copy
import json
import zipfile
import re
import random
import traceback
import StringIO
import gzip
import shutil
import sqlite3
import cPickle as pickle
import datetime
import time
import getpass
import numpy
from sqlalchemy import and_, or_, func
from klab import colortext
from klab.bio.pdb import PDB
from klab.bio.basics import ChainMutation, residue_type_1to3_map
from klab.fs.fsio import read_file, write_temp_file
from klab.benchmarking.analysis.ddg_binding_affinity_analysis import DBBenchmarkRun as BindingAffinityBenchmarkRun
from klab.bio.alignment import ScaffoldModelChainMapper, DecoyChainMapper
from klab.db.sqlalchemy_interface import row_to_dict, get_or_create_in_transaction, get_single_record_from_query
from klab.stats.misc import get_xy_dataset_statistics_pandas
import kddg.api.schema as dbmodel
from kddg.api.layers import *
from kddg.api.db import ddG, PartialDataException, SanityCheckException
from kddg.api.data import json_dumps
from kddg.api import settings
sys_settings = settings.load()
DeclarativeBase = dbmodel.DeclarativeBase
def get_interface(passwd, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port):
'''This is the function that should be used to get a BindingAffinityDDGInterface object. It hides the private methods
from the user so that a more traditional object-oriented API is created.'''
return GenericUserInterface.generate(BindingAffinityDDGInterface, passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port)
def get_interface_with_config_file(host_config_name = sys_settings.database.host_config_name, rosetta_scripts_path = None, rosetta_database_path = None, get_interface_factory = get_interface, passed_port = None):
# Uses ~/.my.cnf to get authentication information
### Example .my.cnf (host_config_name will equal myserver):
### [clientmyserver]
### user=username
### password=notmyrealpass
### host=server.domain.com
my_cnf_path = os.path.expanduser(os.path.join('~', '.my.cnf'))
if not os.path.isfile( os.path.expanduser(my_cnf_path) ):
raise Exception("A .my.cnf file must exist at: " + my_cnf_path)
# These three variables must be set in a section of .my.cnf named host_config_name
user = None
password = None
host = None
port = None
with open(my_cnf_path, 'r') as f:
parsing_config_section = False
for line in f:
if line.strip() == '[client%s]' % host_config_name:
parsing_config_section = True
elif line.strip() == '':
parsing_config_section = False
elif parsing_config_section:
if '=' in line:
tokens = line.strip().split('=')
key, val = tokens[0], '='.join(tokens[1:]) # values may contain '=' signs
key, val = key.strip(), val.strip()
if key == 'user':
user = val
elif key == 'password':
password = val
elif key == 'host':
host = val
elif key == 'port':
port = int(val)
else:
parsing_config_section = False
port = passed_port or port or 3306
if not user or not password or not host:
raise Exception("Couldn't find host(%s), username(%s), or password in section %s in %s" % (host, user, host_config_name, my_cnf_path) )
return get_interface_factory(password, username = user, hostname = host, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port)
class BindingAffinityDDGInterface(ddG):
'''This is the internal API class that should be NOT used to interface with the database.'''
def __init__(self, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
super(BindingAffinityDDGInterface, self).__init__(passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port, file_content_buffer_size = file_content_buffer_size)
self.prediction_data_path = self.DDG_db.execute('SELECT Value FROM _DBCONSTANTS WHERE VariableName="PredictionPPIDataPath"')[0]['Value']
self.unfinished_prediction_ids_cache = {}
def get_prediction_ids_with_scores(self, prediction_set_id, score_method_id = None):
'''Returns a set of all prediction_ids that already have an associated score in prediction_set_id
'''
score_table = self._get_sqa_prediction_structure_scores_table()
prediction_table = self.PredictionTable
if score_method_id != None:
return set([r['ID'] for r in self.DDG_db.execute_select('''
SELECT DISTINCT PredictionPPI.ID FROM PredictionPPIStructureScore
INNER JOIN PredictionPPI
ON PredictionPPI.ID=PredictionPPIStructureScore.PredictionPPIID
WHERE PredictionPPI.PredictionSet=%s AND PredictionPPIStructureScore.ScoreMethodID=%s''', parameters=(prediction_set_id, score_method_id))])
else:
return set([r['ID'] for r in self.DDG_db.execute_select('''
SELECT DISTINCT PredictionPPI.ID FROM PredictionPPIStructureScore
INNER JOIN PredictionPPI
ON PredictionPPI.ID=PredictionPPIStructureScore.PredictionPPIID
WHERE PredictionPPI.PredictionSet=%s''', parameters=(prediction_set_id,))])
def get_prediction_ids_and_record_ids(self, prediction_set_id, data_set_id = 'ZEMu_10.1002/prot.24634'):
'''Returns a set of all prediction_ids and the record ids for the underlying data set
'''
# Old query (delete if reading this):
# SELECT PredictionPPI.ID, PredictionPPI.PredictionSet, PredictionPPI.PPMutagenesisID, PredictionPPI.UserPPDataSetExperimentID,
# PPIDataSetDDG.RecordNumber, PPIDataSetDDG.PublishedPDBFileID
# FROM PredictionPPI
# INNER JOIN PPIDataSetDDG ON PPIDataSetDDG.PPMutagenesisID=PredictionPPI.PPMutagenesisID
# WHERE PredictionPPI.PredictionSet=%s
# AND PPIDataSetDDG.DataSetID=%s
return self.DDG_db.execute_select('''
SELECT PredictionPPI.ID, PredictionPPI.PredictionSet, PredictionPPI.PPMutagenesisID, PredictionPPI.UserPPDataSetExperimentID, PPIDataSetDDG.RecordNumber, PPIDataSetDDG.PublishedPDBFileID
FROM PredictionPPI
INNER JOIN
(SELECT UserPPDataSetExperiment.ID AS UserPPDataSetExperimentID, PPComplexID, SetNumber
FROM UserPPDataSetExperiment
INNER JOIN UserPPDataSetExperimentTag ON UserPPDataSetExperiment.ID=UserPPDataSetExperimentTag.UserPPDataSetExperimentID
WHERE
UserPPDataSetExperimentTag.Tag = 'ZEMu') AS ZEMuUserDataSet
ON PredictionPPI.UserPPDataSetExperimentID=ZEMuUserDataSet.UserPPDataSetExperimentID
INNER JOIN PPIDataSetDDG
ON PPIDataSetDDG.PPMutagenesisID=PredictionPPI.PPMutagenesisID AND PPIDataSetDDG.PPComplexID = ZEMuUserDataSet.PPComplexID AND PPIDataSetDDG.SetNumber = ZEMuUserDataSet.SetNumber
WHERE
PredictionPPI.PredictionSet = %s AND
PPIDataSetDDG.DataSetID=%s AND
PPIDataSetDDG.PPComplexID = ZEMuUserDataSet.PPComplexID AND
PPIDataSetDDG.SetNumber = ZEMuUserDataSet.SetNumber AND
PPIDataSetDDG.RecordNumber NOT IN (929, 524, 468, 1027, 1026)
''', parameters=(prediction_set_id, data_set_id))
def get_unfinished_prediction_ids(self, prediction_set_id):
'''Returns a set of all prediction_ids that have Status != "done"
'''
if prediction_set_id in self.unfinished_prediction_ids_cache:
return self.unfinished_prediction_ids_cache[prediction_set_id]
else:
unfinished_ids = [r.ID for r in self.get_session().query(self.PredictionTable).filter(and_(self.PredictionTable.PredictionSet == prediction_set_id, self.PredictionTable.Status != 'done'))]
self.unfinished_prediction_ids_cache[prediction_set_id] = unfinished_ids
return unfinished_ids
def get_prediction_ids_without_scores(self, prediction_set_id, score_method_id = None):
all_prediction_ids = [x for x in self.get_prediction_ids(prediction_set_id)]
all_prediction_ids_set = set()
for prediction_id in all_prediction_ids:
all_prediction_ids_set.add( prediction_id )
scored_prediction_ids_set = self.get_prediction_ids_with_scores(prediction_set_id, score_method_id = score_method_id)
return [x for x in all_prediction_ids_set.difference(scored_prediction_ids_set)]
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_pdb
def get_pdb_chains_for_prediction(self, prediction_id):
# look up the complex associated with the dataset record for the list of chains
raise Exception('This needs to be implemented.')
@informational_pdb
def get_chain_sets_for_mutatagenesis(self, mutagenesis_id, complex_id = None):
'''Gets a list of possibilities for the associated complex and calls get_chains_for_mutatagenesis on each.
e.g. returns {('1KI1', 0) : {'L' : ['A','B'], 'R' : ['C']}, ('12AB', 2) : {'L' : ['L','H'], 'R' : ['A']}, ...}
This function assumes that a complex structure is required i.e. that all chains in the PDB chain set are in the same PDB file.
This is a useful method for listing the possible complexes to use in a prediction or to determine whether one
may be missing. and we need to update the database.'''
pp_mutagenesis = self.DDG_db.execute_select("SELECT * FROM PPMutagenesis WHERE ID=%s", parameters = (mutagenesis_id,))
# Sanity checks
assert(len(pp_mutagenesis) == 1)
if complex_id:
assert(pp_mutagenesis[0]['PPComplexID'] == complex_id)
else:
complex_id = pp_mutagenesis[0]['PPComplexID']
d = {}
for pdb_set in self.DDG_db.execute_select("SELECT * FROM PPIPDBSet WHERE PPComplexID=%s AND IsComplex=1", parameters = (complex_id,)):
pdb_set_number = pdb_set['SetNumber']
pdb_file_ids = self.DDG_db.execute_select("SELECT DISTINCT PDBFileID FROM PPIPDBPartnerChain WHERE PPComplexID=%s AND SetNumber=%s", parameters = (complex_id, pdb_set_number))
assert(len(pdb_file_ids) == 1)
pdb_file_id = pdb_file_ids[0]['PDBFileID']
d[(pdb_file_id, pdb_set_number)] = self.get_chains_for_mutatagenesis(mutagenesis_id, pdb_file_id, pdb_set_number)
return d
@informational_pdb
def get_chains_for_mutatagenesis(self, mutagenesis_id, pdb_file_id, pdb_set_number, complex_id = None, tsession = None):
'''Returns a dictionary mapping 'L' to the list of left chains and 'R' to the list of right chains.
This function assumes that a complex structure is required i.e. that all chains in the PDB chain set are in the same PDB file.
'''
tsession = tsession or self.get_session() # do not create a new session
pp_mutagenesis = None
for r in tsession.execute('''SELECT * FROM PPMutagenesis WHERE ID=:mutagenesis_id''', dict(mutagenesis_id = mutagenesis_id)):
assert(pp_mutagenesis == None)
pp_mutagenesis = r
# Sanity checks
if complex_id:
assert(pp_mutagenesis['PPComplexID'] == complex_id)
pdb_set = None
for r in tsession.execute('''SELECT * FROM PPIPDBSet WHERE PPComplexID=:complex_id AND SetNumber=:pdb_set_number''', dict(complex_id = complex_id, pdb_set_number = pdb_set_number)):
assert(pdb_set == None)
pdb_set = r
assert(pdb_set['IsComplex'] == 1) # complex structure check
else:
complex_id = pp_mutagenesis['PPComplexID']
pdb_file_id, complex_chains = self.get_bound_pdb_set_details(complex_id, pdb_set_number, pdb_file_id = pdb_file_id, tsession = tsession)
return complex_chains
def get_bound_pdb_set_details(self, complex_id, pdb_set_number, pdb_file_id = None, tsession = None):
'''Returns the pdb_id and complex partner definitions (left PDB chains, right PDB chains) for complexes where all chains share the same PDB structure.'''
tsession = tsession or self.get_session() # do not create a new session
assert(complex_id != None and pdb_set_number != None)
complex_chains = dict(L = [], R = [])
for c in tsession.execute('''SELECT * FROM PPIPDBPartnerChain WHERE PPComplexID=:complex_id AND SetNumber=:pdb_set_number ORDER BY ChainIndex''', dict(complex_id = complex_id, pdb_set_number = pdb_set_number)):
if pdb_file_id:
assert(c['PDBFileID'] == pdb_file_id) # complex structure check
else:
pdb_file_id = c['PDBFileID']
complex_chains[c['Side']].append(c['Chain'])
assert(complex_chains['L'] and complex_chains['R'])
assert(len(set(complex_chains['L']).intersection(set(complex_chains['R']))) == 0) # in one unbound case, the same chain appears twice on one side (2CLR_DE|1CD8_AA, may be an error since this was published as 1CD8_AB but 1CD8 has no chain B) but it seems reasonable to assume that a chain should only appear on one side
return pdb_file_id, complex_chains
@informational_pdb
def get_pdb_mutations_for_mutagenesis(self, mutagenesis_id, pdb_file_id, set_number, complex_id = None):
'''Returns the PDB mutations for a mutagenesis experiment as well as the PDB residue information.'''
pdb_mutations = []
for pdb_mutation in self.DDG_db.execute_select('''
SELECT PPMutagenesisPDBMutation.*, PDBResidue.ResidueType,
PDBResidue.BFactorMean, PDBResidue.BFactorDeviation,
PDBResidue.ComplexExposure, PDBResidue.ComplexDSSP, PDBResidue.MonomericExposure, PDBResidue.MonomericDSSP
FROM
PPMutagenesisPDBMutation
INNER JOIN
PDBResidue ON PPMutagenesisPDBMutation.PDBFileID = PDBResidue.PDBFileID AND PPMutagenesisPDBMutation.Chain = PDBResidue.Chain AND PPMutagenesisPDBMutation.ResidueID = PDBResidue.ResidueID AND PPMutagenesisPDBMutation.WildTypeAA = PDBResidue.ResidueAA
WHERE PPMutagenesisID=%s AND PDBResidue.PDBFileID=%s AND SetNumber=%s ORDER BY Chain, ResidueID''', parameters=(mutagenesis_id, pdb_file_id, set_number)):
if complex_id:
assert(pdb_mutation['PPComplexID'] == complex_id)
pdb_mutations.append(pdb_mutation)
return pdb_mutations
@sanity_check
def find_pdb_files_involved_in_multiple_complexes(self):
known_exceptions = {
# These need to be checked - 1OYV only has 1 chain besides Subtilisin Carlsberg
'1OYV' : 2, # Subtilisin Carlsberg bound to: i) domain 1 of its inhibitor; and ii) domain 2 of its inhibitor.
'1QFW' : 2, # Human chorionic gonadotropin (chains A, B) bound to: i) Fv anti-alpha (chains L, H); and ii) Fv anti-beta (chain M, I).
}
d = {}
for r in self.DDG_db.execute_select('SELECT ID FROM PDBFile ORDER BY ID'):
pdb_id = r['ID']
complex_ids = self.search_complexes_by_pdb_id(pdb_id)
if pdb_id.upper() in known_exceptions:
assert(len(complex_ids) == known_exceptions[pdb_id])
else:
if len(complex_ids) > 1:
d[pdb_id] = {'complex_ids' : complex_ids, 'complexes' : {}}
for complex_id in complex_ids:
d[pdb_id]['complexes'][complex_id] = self.get_complex_details(complex_id)
if d:
raise SanityCheckException('Some PDB files are associated with multiple complexes:\n{0}'.format(pprint.pformat(d)))
@informational_complex
def search_complexes_by_pdb_id(self, pdb_id):
'''Returns the list of PPComplexIDs which are related to the PDB ID. Typically this list will be empty or have one
ID. In rarer cases, the same structure may be used as a structural basis for multiple complexes.'''
results = self.DDG_db_utf.execute_select('''
SELECT DISTINCT PPIPDBSet.PPComplexID FROM PPIPDBPartnerChain
INNER JOIN PPIPDBSet ON PPIPDBPartnerChain.PPComplexID=PPIPDBSet.PPComplexID AND PPIPDBPartnerChain.SetNumber=PPIPDBSet.SetNumber
WHERE PDBFileID=%s AND IsComplex=1
''', parameters=(pdb_id,))
return [r['PPComplexID'] for r in results]
@informational_job
def get_complex_details(self, complex_id):
results = self.DDG_db_utf.execute_select('SELECT * FROM PPComplex WHERE ID=%s', parameters=(complex_id, ))
if len(results) == 1:
return results[0]
return None
def _get_dataset_record_with_checks(self, dataset_experiment_id, dataset_id = None):
if dataset_id:
de = self.DDG_db_utf.execute_select('SELECT * FROM PPIDataSetDDG WHERE ID=%s AND DataSetID=%s', parameters=(dataset_experiment_id, dataset_id))
if len(de) != 1:
raise colortext.Exception('Dataset record #%d does not exist for/correspond to the dataset %s.' % (dataset_experiment_id, dataset_id))
else:
de = self.DDG_db_utf.execute_select('SELECT * FROM PPIDataSetDDG WHERE ID=%s', parameters=(dataset_experiment_id,))
if len(de) != 1:
raise colortext.Exception('Dataset record #%d does not exist.' % (dataset_experiment_id, ))
return de[0]
@informational_job
def get_job_details(self, prediction_id, include_files = True, truncate_content = None):
try:
prediction_record = self.get_session().query(self.PredictionTable).filter(self.PredictionTable.ID == prediction_id).one()
except Exception, e:
raise colortext.Exception('No details could be found for prediction #{0} in the database.\n{1}\n{2}'.format(prediction_id, str(e), traceback.format_exc()))
# mutfile_content = self.create_mutfile(prediction_id)
# Read the UserPPDataSetExperiment details
user_dataset_experiment_id = prediction_record.UserPPDataSetExperimentID
ude_details = self.get_user_dataset_experiment_details(user_dataset_experiment_id)
assert(ude_details['Mutagenesis']['PPMutagenesisID'] == prediction_record.PPMutagenesisID)
# Convert the record to dict
prediction_record = row_to_dict(prediction_record)
prediction_record['Files'] = {}
if include_files:
prediction_record['Files'] = self.get_job_files(prediction_id, truncate_content = truncate_content)
for k, v in ude_details.iteritems():
assert(k not in prediction_record)
prediction_record[k] = v
return prediction_record
@informational_job
def get_dataset_experiment_details(self, dataset_experiment_id, dataset_id = None):
de = self._get_dataset_record_with_checks(dataset_experiment_id, dataset_id = dataset_id)
PDBFileID = de['PDBFileID']
PPMutagenesisID = de['PPMutagenesisID']
ComplexID = self.DDG_db.execute_select('SELECT PPComplexID FROM PPMutagenesis WHERE ID=%s', parameters=(PPMutagenesisID,))[0]['PPComplexID']
SetNumber = None
# todo: this is a nasty hack due to the fact that we do not currently store the SetNumber and PPComplexID in the PPIDataSetDDG table. See ticket:1457.
pdb_sets = self.DDG_db.execute_select('SELECT * FROM PPIPDBSet WHERE PPComplexID=%s AND IsComplex=1', parameters=(ComplexID,))
if len(pdb_sets) > 1:
probable_sets = self.DDG_db.execute_select('SELECT DatabaseKey FROM PPIDatabaseComplex WHERE DatabaseName LIKE "%%SKEMPI%%" AND DatabaseKey LIKE "%%%s%%" AND PPComplexID=%s' % (PDBFileID, ComplexID))
assert(len(probable_sets) == 1)
match_pdb_chains = sorted(list(''.join(probable_sets[0]['DatabaseKey'].split('_')[1:])))
pdb_sets = {}
for set_record in self.DDG_db.execute_select('SELECT * FROM PPIPDBPartnerChain WHERE PPComplexID=%s AND PDBFileID=%s', parameters=(ComplexID, PDBFileID)):
pdb_sets[set_record['SetNumber']] = pdb_sets.get(set_record['SetNumber'], [])
pdb_sets[set_record['SetNumber']].append(set_record['Chain'])
pdb_sets[set_record['SetNumber']] = sorted(pdb_sets[set_record['SetNumber']])
hits = []
for k, v in pdb_sets.iteritems():
if v == match_pdb_chains:
hits.append(k)
if not len(hits) == 1:
raise Exception('Error: multiple possible PDB sets for dataset record #%d and PPMutagenesisID=%s.' % (dataset_experiment_id, PPMutagenesisID))
SetNumber = hits[0]
elif len(pdb_sets) == 0:
raise Exception('Error: no possible PDB sets for dataset record #%d and PPMutagenesisID=%s.' % (dataset_experiment_id, PPMutagenesisID))
else:
SetNumber = pdb_sets[0]['SetNumber']
pdb_mutations = self.get_pdb_mutations_for_mutagenesis(PPMutagenesisID, PDBFileID, SetNumber, complex_id = ComplexID)
d = dict(
_DataSetID = de['ID'],
RecordID = de['RecordNumber'],
PublishedDDG = de['PublishedDDG'],
PDBFileID = PDBFileID,
DerivedMutation = de['RecordIsDerivative'] == 1,
PossiblyBadRecord = de['PossibleError'] == 1,
Notes = [de['Remark'], de['CorrectionRemark']],
Mutagenesis = dict(
PPMutagenesisID = PPMutagenesisID,
),
Complex = self.get_complex_details(ComplexID),
Structure = dict(
PDBFileID = PDBFileID,
SetNumber = SetNumber,
Partners = self.get_chains_for_mutatagenesis(PPMutagenesisID, PDBFileID, SetNumber, complex_id = ComplexID),
),
PDBMutations = pdb_mutations,
)
if de['PublishedPDBFileID'] != PDBFileID:
d['Notes'].append("The PDB ID was changed by Shane O'Connor from %s to %s." % (de['PublishedPDBFileID'], PDBFileID))
d['Notes'] = '. '.join([x for x in d['Notes'] if x])
d['ExperimentalDDGs'] = self.get_ddg_values_for_dataset_record(dataset_experiment_id, dataset_id = dataset_id)
d['DDG'] = sum([((e.get('Positive') or {}).get('DDG', 0) - (e.get('Negative') or {}).get('DDG', 0)) for e in d['ExperimentalDDGs']])
# todo: add SCOPe class, Pfam domain
return d
def _get_ddg_values_for_dataset_record(self, dataset_experiment_id, dataset_id = None):
de = self._get_dataset_record_with_checks(dataset_experiment_id, dataset_id = dataset_id)
ddg_pairs = self.DDG_db.execute_select('SELECT PositiveDependentPPIDDGID, NegativeDependentPPIDDGID FROM PPIDataSetDDGSource WHERE PPIDataSetDDGID=%s', parameters=(dataset_experiment_id,))
assert(ddg_pairs)
ddgs = []
for ddg_pair in ddg_pairs:
paired_record = {'Positive' : None, 'Negative' : None}
if ddg_pair['PositiveDependentPPIDDGID']:
positive_record = self.DDG_db.execute_select('SELECT * FROM PPIDDG WHERE ID=%s', parameters=(ddg_pair['PositiveDependentPPIDDGID'],))[0]
paired_record['Positive'] = dict(
DDG = positive_record['DDG'],
LocationOfValueInPublication = positive_record['LocationOfValueInPublication'],
Publication = positive_record['Publication'],
Temperature = positive_record['Temperature'],
pH = positive_record['pH'],
)
if ddg_pair['NegativeDependentPPIDDGID']:
negative_record = self.DDG_db.execute_select('SELECT * FROM PPIDDG WHERE ID=%s', parameters=(ddg_pair['NegativeDependentPPIDDGID'],))[0]
paired_record['Negative'] = dict(
DDG = negative_record['DDG'],
LocationOfValueInPublication = negative_record['LocationOfValueInPublication'],
Publication = negative_record['Publication'],
Temperature = negative_record['Temperature'],
pH = negative_record['pH'],
)
ddgs.append(paired_record)
return ddgs
@informational_job
def get_user_dataset_experiment_details(self, user_dataset_experiment_id, user_dataset_id = None):
if user_dataset_id:
colortext.ppurple('PRE-SELECT')
ude = self.DDG_db.execute_select('SELECT * FROM UserPPDataSetExperiment WHERE ID=%s AND UserDataSetID=%s', parameters=(user_dataset_experiment_id, user_dataset_id))
colortext.ppurple('POST-SELECT')
if len(ude) != 1:
raise colortext.Exception('User dataset experiment %d does not exist for/correspond to the user dataset %s.' % (user_dataset_experiment_id, user_dataset_id))
else:
ude = self.DDG_db.execute_select('SELECT * FROM UserPPDataSetExperiment WHERE ID=%s', parameters=(user_dataset_experiment_id,))
if len(ude) != 1:
raise colortext.Exception('User dataset experiment %d does not exist.' % (user_dataset_experiment_id, ))
ude = ude[0]
user_dataset_id = ude['UserDataSetID']
assert(ude['IsComplex'] == 1)
pdb_mutations = self.get_pdb_mutations_for_mutagenesis(ude['PPMutagenesisID'], ude['PDBFileID'], ude['SetNumber'], complex_id = ude['PPComplexID'])
return dict(
Mutagenesis = dict(
PPMutagenesisID = ude['PPMutagenesisID'],
),
Complex = self.get_complex_details(ude['PPComplexID']),
Structure = dict(
PDBFileID = ude['PDBFileID'],
SetNumber = ude['SetNumber'],
Partners = self.get_chains_for_mutatagenesis(ude['PPMutagenesisID'], ude['PDBFileID'], ude['SetNumber'], complex_id = ude['PPComplexID']),
),
PDBMutations = pdb_mutations,
)
def _export_dataset(self, dataset_id):
'''Returns a dict containing the dataset information.'''
dataset_record = self.DDG_db.execute_select('SELECT * FROM DataSet WHERE ID=%s', parameters=(dataset_id,))
if not dataset_record:
raise Exception('Dataset %s does not exist in the database.' % dataset_id)
dataset_record = dataset_record[0]
if dataset_record['DatasetType'] != 'Binding affinity' and dataset_record['DatasetType'] != 'Protein stability and binding affinity':
raise Exception('The dataset %s does not contain any binding affinity data..' % dataset_id)
# Read the UserPPDataSetExperiment details
data = []
ref_ids = set()
for dataset_ddg in self.DDG_db.execute_select('SELECT * FROM PPIDataSetDDG WHERE DataSetID=%s ORDER BY Section, RecordNumber', parameters=(dataset_id,)):
de_details = self.get_dataset_experiment_details(dataset_ddg['ID'], dataset_id)
for ddg_pair in de_details['ExperimentalDDGs']:
if ddg_pair['Positive']: ref_ids.add(ddg_pair['Positive']['Publication'])
if ddg_pair['Negative']: ref_ids.add(ddg_pair['Negative']['Publication'])
data.append(de_details)
references = {}
for ref_id in sorted(ref_ids):
references[ref_id] = self.get_publication(ref_id)
return dict(
Data = data,
References = references
)
@informational_job
def export_dataset_to_csv(self, dataset_id):
'''Returns the dataset information in CSV format.'''
dataset_set = self._export_dataset(dataset_id)['Data']
lines = ['\t'.join(['Record #', 'Mutagenesis #', 'Partner 1', 'Partner 2', 'PDB ID', 'Partner 1 chains', 'Partner 2 chains', 'Mutations', 'DDG', 'PublishedDDG', 'IsDerivedMutation'])]
for record in dataset_set:
line = '\t'.join([
str(record['RecordID']),
str(record['Mutagenesis']['PPMutagenesisID']),
record['Complex']['LShortName'],
record['Complex']['RShortName'],
record['PDBFileID'],
','.join(sorted(record['Structure']['Partners']['L'])),
','.join(sorted(record['Structure']['Partners']['R'])),
','.join(['%s:%s%s%s' % (m['Chain'], m['WildTypeAA'], m['ResidueID'], m['MutantAA']) for m in record['PDBMutations']]),
str(record['DDG']),
str(record['PublishedDDG']),
str(int(record['DerivedMutation'])),
])
lines.append(line)
return ('\n'.join(lines)).encode('utf8', 'replace')
@informational_job
def get_predictions_experimental_details(self, prediction_id, userdatset_experiment_ids_to_subset_ddgs = None, include_files = False, reference_ids = set(), include_experimental_data = True):
details = self.get_job_details(prediction_id, include_files = include_files)
# Sanity checks and redundancy removal
PPMutagenesisID = details['PPMutagenesisID']
ComplexID = details['Complex']['ID']
chains = set([item for sublist in [v for k, v in details['Structure']['Partners'].iteritems()] for item in sublist])
PDBFileID = details['Structure']['PDBFileID']
SetNumber = details['Structure']['SetNumber']
for m in details['PDBMutations']:
assert(m['PPMutagenesisID'] == PPMutagenesisID)
del m['PPMutagenesisID']
assert(ComplexID == m['PPComplexID'])
del m['PPComplexID']
assert(PDBFileID == m['PDBFileID'])
del m['PDBFileID']
assert(SetNumber == m['SetNumber'])
del m['SetNumber']
assert(m['Chain'] in chains)
assert(details['Mutagenesis']['PPMutagenesisID'] == PPMutagenesisID)
del details['Mutagenesis']
# Add the DDG values for the related analysis sets
user_dataset_experiment_id = details['UserPPDataSetExperimentID']
if include_experimental_data:
userdatset_experiment_ids_to_subset_ddgs = userdatset_experiment_ids_to_subset_ddgs or self.get_experimental_ddgs_by_analysis_set(user_dataset_experiment_id, reference_ids = reference_ids)
assert('DDG' not in details)
details['DDG'] = userdatset_experiment_ids_to_subset_ddgs[user_dataset_experiment_id]
else:
details['DDG'] = None
return details
@informational_job
def get_experimental_ddgs_by_analysis_set(self, user_dataset_experiment_id = None, reference_ids = set()):
# Determine the set of analysis sets
userdatset_experiment_ids_to_subset_ddgs = {}
analysis_sets = [r['Subset'] for r in self.DDG_db.execute_select('SELECT DISTINCT Subset FROM UserPPAnalysisSet')]
# Query the database, restricting to one user_dataset_experiment_id if passed
parameters = None
qry = '''
SELECT UserPPAnalysisSet.*,
(IFNULL(PositiveDDG.DDG, 0) - IFNULL(NegativeDDG.DDG, 0)) AS ExperimentalDDG,
IF(ISNULL(NegativeDDG.DDG), 0, 1) AS DerivedMutation,
PositiveDDG.PPMutagenesisID, PositiveDDG.Publication AS PositiveDDGPublication, PositiveDDG.DDG as PositiveDDGValue,
NegativeDDG.PPMutagenesisID, NegativeDDG.Publication AS NegativeDDGPublication, NegativeDDG.DDG as NegativeDDGValue
FROM UserPPAnalysisSet
LEFT JOIN PPIDDG AS PositiveDDG ON PositiveDependentPPIDDGID=PositiveDDG.ID
LEFT JOIN PPIDDG AS NegativeDDG ON NegativeDependentPPIDDGID=NegativeDDG.ID'''
if user_dataset_experiment_id != None:
qry += ' WHERE UserPPAnalysisSet.UserPPDataSetExperimentID=%s'
parameters = (user_dataset_experiment_id,)
results = self.DDG_db.execute_select(qry, parameters)
# Return the mapping
for r in results:
if not userdatset_experiment_ids_to_subset_ddgs.get(r['UserPPDataSetExperimentID']):
d = dict.fromkeys(analysis_sets, None)
for analysis_set in analysis_sets:
d[analysis_set] = {}
userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']] = d
userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']][r['Subset']] = userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']][r['Subset']] or dict(
Cases = set(),
DDGs = [],
IsDerivedValue = False,
MeanDDG = None
)
# Store the references IDs
reference = None
if r['PositiveDDGPublication'] and r['NegativeDDGPublication']:
reference = r['PositiveDDGPublication'] + ', ' + r['NegativeDDGPublication']
reference_ids.add(r['PositiveDDGPublication'])
reference_ids.add(r['NegativeDDGPublication'])
elif r['PositiveDDGPublication']:
reference = r['PositiveDDGPublication']
reference_ids.add(r['PositiveDDGPublication'])
elif r['NegativeDDGPublication']:
reference = r['NegativeDDGPublication']
reference_ids.add(r['NegativeDDGPublication'])
record_d = userdatset_experiment_ids_to_subset_ddgs[r['UserPPDataSetExperimentID']][r['Subset']]
record_d['Cases'].add((r['Subset'], r['Section'], r['RecordNumber']))
record_d['DDGs'].append({'Value' : r['ExperimentalDDG'], 'IsDerivedValue' : r['DerivedMutation'], 'Reference' : reference})
record_d['IsDerivedValue'] = record_d['IsDerivedValue'] or r['DerivedMutation']
# Calculate the mean of the DDG values
# Note: Based on experience, summing in Python over small lists can be faster than creating temporary numpy arrays due to the array creation overhead
for k, v in userdatset_experiment_ids_to_subset_ddgs.iteritems():
for subset, subset_ddgs in v.iteritems():
if subset_ddgs:
num_points = len(subset_ddgs['DDGs'])
if num_points > 1:
subset_ddgs['MeanDDG'] = sum([float(ddg['Value'])for ddg in subset_ddgs['DDGs']]) / float(num_points)
else:
# Avoid unnecessary garbage creation and division
subset_ddgs['MeanDDG'] = subset_ddgs['DDGs'][0]['Value']
return userdatset_experiment_ids_to_subset_ddgs
@informational_job
def export_prediction_cases_to_json(self, prediction_set_id, retrieve_references = True):
print('This will probably break - I need to dump datetime.datetime objects to ISO strings.')
return json_dumps(self.get_prediction_set_case_details(prediction_set_id, retrieve_references = retrieve_references))
@informational_job
def export_prediction_cases_to_pickle(self, prediction_set_id, retrieve_references = True):
return pickle.dumps(self.get_prediction_set_case_details(prediction_set_id, retrieve_references = retrieve_references))
##### Public API: Rosetta-related functions
@job_input
def create_resfile(self, prediction_id):
raise Exception('This needs to be implemented.')
@job_input
def create_mutfile(self, prediction_id):
raise Exception('This needs to be implemented.')
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation/management API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via the
# trickle-down proteomics paradigm.
# PredictionSet interface
@job_creator
def add_prediction_set(self, prediction_set_id, halted = True, priority = 5, batch_size = 40, allow_existing_prediction_set = False,
series_name = None, series_color = 'ff0000', series_alpha = 1.0, description = None):
return super(BindingAffinityDDGInterface, self).add_prediction_set(prediction_set_id, halted = halted, priority = priority, batch_size = batch_size, allow_existing_prediction_set = allow_existing_prediction_set, contains_protein_stability_predictions = False, contains_binding_affinity_predictions = True, series_name = series_name, series_color = series_color, series_alpha = series_alpha, description = description)
@job_creator
def add_development_protocol_command_lines(self, prediction_set_id, protocol_name, application, template_command_line, rosetta_script_file = None):
dev_protocol_id = self._get_dev_protocol_id(protocol_name)
if not dev_protocol_id:
dev_protocol_id = self._create_dev_protocol(protocol_name, application, template_command_line)
rosetta_script = None
if rosetta_script_file:
with open(rosetta_script_file, 'r') as f:
rosetta_script = f.read()
prediction_ids = self.get_prediction_ids(prediction_set_id)
# All functions within the next with block should use the same database cursor.
# The commands then function as parts of a transaction which is rolled back if errors occur within the block
# or else is committed.
file_content_id = None
tsession = self.get_session(new_session = True)
try:
for prediction_id in prediction_ids:
prediction_record = tsession.query(dbmodel.PredictionPPI).filter(dbmodel.PredictionPPI.ID == prediction_id)
prediction_record.DevelopmentProtocolID = dev_protocol_id
tsession.flush()
if rosetta_script:
# Add the Rosetta script to the database, not using cursor
file_content_id = self._add_prediction_file(tsession, prediction_id, rosetta_script, os.path.basename(rosetta_script_file), 'RosettaScript', 'RosettaScript', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'text/xml', file_content_id = file_content_id)
tsession.commit()
tsession.close()
except Exception, e:
colortext.error('Failure: {0}.'.format(str(e)))
colortext.error(traceback.format_exc())
tsession.rollback()
tsession.close()
@job_creator
def add_job(self, tsession, prediction_set_id, protocol_id, pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, extra_rosetta_command_flags = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, test_only = False, pdb_residues_to_rosetta_cache = None, suppress_warnings = False):
'''This function inserts a prediction into the database.
The parameters define:
- the prediction set id used to group this prediction with other predictions for analysis;
- the protocol to be used to run the prediction;
- the set of mutations and PDB complex associated with the mutagenesis experiment;
- whether HETATM lines are to be kept or not.
- additional Rosetta flags e.g. "-ignore_zero_occupancy false" used to determine the mapping from PDB to Rosetta numbering. These flags should correspond to those used in the protocol otherwise errors could occur.
We strip the PDB based on the chains defined by the complex and keep_all_lines and keep_hetatm_lines and store the PDB in the database.
Next, the mapping from Rosetta numbering to PDB numbering is determined and stored in the database.
Then, the appropriate input files e.g. resfiles or mutfiles are generated and stored in the database.
Finally, we add the prediction record and associate it with the generated files.'''
return self._add_job(tsession, prediction_set_id, protocol_id, pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = test_only, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings)
@job_creator
def add_job_by_user_dataset_record(self, prediction_set_id, user_dataset_name, user_dataset_experiment_id, protocol_id, extra_rosetta_command_flags = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, test_only = False, pdb_residues_to_rosetta_cache = None, suppress_warnings = False, tsession = None, allowed_user_datasets = None):
'''Add a prediction job based on a user dataset record. This is typically called during add_prediction_run rather than directly by the user.
user_dataset_name is implied by user_dataset_experiment_id but we include it for sanity checking errors in data-entry.
The extra_rosetta_command_flags variable is used to add additional flags e.g. "-ignore_zero_occupancy false". These should be added if they are used in the protocol.'''
new_session = False
if not tsession:
new_session = True
tsession = self.get_session(new_session = True)
if not allowed_user_datasets:
allowed_user_datasets = self.get_defined_user_datasets(tsession)
try:
user_dataset_id = allowed_user_datasets[user_dataset_name]['ID']
except:
raise colortext.Exception('The user dataset "%s" does not exist for this API.' % user_dataset_name)
udse_table = self._get_sqa_user_dataset_experiment_table()
ude = None
for r in tsession.execute('''SELECT * FROM UserPPDataSetExperiment WHERE ID=:udse AND UserDataSetID=:uds''', dict(udse = user_dataset_experiment_id, uds = user_dataset_id)):
assert(not ude)
ude = r
if not ude:
raise colortext.Exception('User dataset experiment {0} does not exist for/correspond to this user dataset.'.format(user_dataset_experiment_id))
prediction_id = self._add_job(tsession, prediction_set_id, protocol_id, ude.PPMutagenesisID, ude.PPComplexID, ude.PDBFileID, ude.SetNumber, extra_rosetta_command_flags = extra_rosetta_command_flags, user_dataset_experiment_id = user_dataset_experiment_id, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = test_only, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings)
if new_session:
tsession.close()
return prediction_id
@job_creator
def merge_prediction_run(self, from_prediction_set_id, to_prediction_set_id, create_if_does_not_exist = True, series_color = 'ff0000', description = None):
# Start a new transaction
tsession = self.get_session(new_session = True)
try:
# Look up the source prediction set details
try:
from_prediction_set = self.get_session().query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == from_prediction_set_id).one()
except Exception, e:
print(str(e))
print(traceback.format_exc())
raise Exception('Could not retrieve details for source PredictionSet "{0}".'.format(from_prediction_set_id))
# Look up or create the target prediction set details
try:
to_prediction_set_details = self.get_session().query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == to_prediction_set_id).one()
except:
if create_if_does_not_exist:
prediction_set_dict = row_to_dict(from_prediction_set)
prediction_set_dict['ID'] = to_prediction_set_id
prediction_set_dict['EntryDate'] = datetime.datetime.now()
prediction_set_dict['Description'] = description or 'Clone of {0}'.format(from_prediction_set_id)
db_ligand_synonym = get_or_create_in_transaction(tsession, dbmodel.PredictionSet, prediction_set_dict)
else:
raise Exception('Could not retrieve details for target PredictionSet "{0}". To create a new PredictionSet, set create_if_does_not_exist to True.'.format(to_prediction_set_id))
# Create prediction records
num_predictions = len(from_prediction_set.ppi_predictions)
colortext.message('Merging/cloning prediction set.'.format())
c = 1
for prediction in from_prediction_set.ppi_predictions:
colortext.wyellow('{0}/{1}: Prediction #{2}\r'.format(c, num_predictions, str(prediction.ID).ljust(15)))
c += 1
# Add a prediction record if it does not already exist
new_prediction_id = None
if self.get_session().query(self.PredictionTable).filter(and_(
self.PredictionTable.PredictionSet == to_prediction_set_id,
self.PredictionTable.UserPPDataSetExperimentID == prediction.UserPPDataSetExperimentID,
self.PredictionTable.ProtocolID == prediction.ProtocolID)).count() > 0:
continue
else:
new_prediction = prediction.clone(to_prediction_set_id)
tsession.add(new_prediction)
tsession.flush()
new_prediction_id = new_prediction.ID
# Add the prediction file records. The underlying FileContent tables will already exist.
for prediction_file in prediction.files:
new_prediction_file = prediction_file.clone(new_prediction_id)
tsession.add(new_prediction_file)
tsession.flush()
print('\nSuccess.\n')
tsession.commit()
tsession.close()
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
@job_creator
def add_prediction_run(self, prediction_set_id, user_dataset_name, extra_rosetta_command_flags = None, protocol_id = None, tagged_subset = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, quiet = False, test_only = False, only_single_mutations = False, short_run = False, test_run_first = True, show_full_errors = False, suppress_warnings = False):
'''Adds all jobs corresponding to a user dataset e.g. add_prediction_run("my first run", "AllBindingAffinityData", tagged_subset = "ZEMu").
If keep_hetatm_lines is False then all HETATM records for the PDB prediction chains will be removed. Otherwise, they are kept.
input_files is a global parameter for the run which is generally empty. Any files added here will be associated to all predictions in the run.
The extra_rosetta_command_flags parameter e.g. "-ignore_zero_occupancy false" is used to determine the mapping
from PDB to Rosetta numbering. These flags should correspond to those used in the protocol otherwise errors could occur.
Returns False if no predictions were added to the run else return True if all predictions (and there were some) were added to the run.'''
# For test runs, this number of predictions will be created
short_run_limit = 100
# Create a new session
tsession = self.get_session(new_session = True)
try:
# Check preconditions
assert(not(input_files)) # todo: do something with input_files when we use that here - call self._add_file_content, associate the filenames with the FileContent IDs, and pass that dict to add_job which will create PredictionPPIFile records
assert(only_single_mutations == False) # todo: support this later? it may make more sense to just define new UserDataSets
allowed_user_datasets = self._add_prediction_run_preconditions(tsession, prediction_set_id, user_dataset_name, tagged_subset)
# Get the list of user dataset experiment records
user_dataset_experiments = self.get_user_dataset_experiments(tsession, user_dataset_name, tagged_subset = tagged_subset)
assert(set([u.IsComplex for u in user_dataset_experiments]) == set([1,]))
num_user_dataset_experiments = user_dataset_experiments.count()
if not user_dataset_experiments:
tsession.close()
return False
# Count the number of individual PDB files
pdb_file_ids = set([u.PDBFileID for u in user_dataset_experiments])
tagged_subset_str = ''
if not quiet:
if tagged_subset:
tagged_subset_str = 'subset "%s" of ' % tagged_subset
# Create a cache to speed up job insertion
pdb_residues_to_rosetta_cache = {}
t1 = time.time()
# Run one query over the PredictionSet
result_set = None
if protocol_id:
result_set = tsession.execute('''SELECT * FROM PredictionPPI WHERE PredictionSet=:prediction_set AND ProtocolID=:protocol_id''', dict(prediction_set = prediction_set_id, protocol_id = protocol_id))
else:
result_set = tsession.execute('''SELECT * FROM PredictionPPI WHERE PredictionSet=:prediction_set AND ProtocolID IS NULL''', dict(prediction_set = prediction_set_id))
existing_results = set()
for r in result_set:
existing_results.add(r['UserPPDataSetExperimentID'])
# Test all predictions before creating records
if test_only or test_run_first:
if not quiet:
colortext.message('Testing %d predictions spanning %d PDB files for %suser dataset "%s" using protocol %s.' % (num_user_dataset_experiments, len(pdb_file_ids), tagged_subset_str, user_dataset_name, str(protocol_id or 'N/A')))
# Progress counter setup
count, records_per_dot = 0, 50
showprogress = not(quiet) and num_user_dataset_experiments > 300
if showprogress: print("|" + ("*" * (int(num_user_dataset_experiments/records_per_dot)-2)) + "|")
for ude in user_dataset_experiments:
# If the mutagenesis already exists in the prediction set, do not test it again
if not(ude.ID in existing_results):
# Test the prediction setup
prediction_id = self.add_job_by_user_dataset_record(prediction_set_id, user_dataset_name, ude.ID, protocol_id, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = True, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings, tsession = tsession, allowed_user_datasets = allowed_user_datasets)
# Progress counter
count += 1
if showprogress and count % records_per_dot == 0: colortext.write(".", "cyan", flush = True)
if short_run and count >= short_run_limit: break
if not quiet: print('')
t2 = time.time()
print('Time taken for dry run: {0}s.'.format(t2 - t1))
if test_only:
tsession.rollback()
tsession.close()
return
# Progress counter setup
failed_jobs = {}
if not quiet:
colortext.message('Adding %d predictions spanning %d PDB files for %suser dataset "%s" using protocol %s.' % (num_user_dataset_experiments, len(pdb_file_ids), tagged_subset_str, user_dataset_name, str(protocol_id or 'N/A')))
count, records_per_dot = 0, 50
showprogress = not(quiet) and num_user_dataset_experiments > 300
if showprogress: print("|" + ("*" * (int(num_user_dataset_experiments/records_per_dot)-2)) + "|")
t1 = time.time()
time_to_ignore = 0
# Add the individual predictions
for ude in user_dataset_experiments:
# If the mutagenesis already exists in the prediction set, do not add it again
if not(ude.ID in existing_results):
t3 = time.time()
try:
# Add the prediction
user_dataset_id = allowed_user_datasets[user_dataset_name]['ID']
prediction_id = self.add_job_by_user_dataset_record(prediction_set_id, user_dataset_name, ude.ID, protocol_id, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = False, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings, tsession = tsession, allowed_user_datasets = allowed_user_datasets)
except Exception, e:
time_to_ignore += time.time() - t3
user_dataset_id = allowed_user_datasets[user_dataset_name]['ID']
ude_record = None
for r in tsession.execute('SELECT * FROM UserPPDataSetExperiment WHERE ID=:ude_id AND UserDataSetID=:uds_id', dict(ude_id = ude.ID, uds_id = user_dataset_id)):
assert(ude_record == None)
ude_record = r
assert(ude_record['ID'] == ude.ID)
colortext.error('Adding the prediction for UserPPDataSetExperimentID %(ID)d failed (%(PDBFileID)s).' % ude_record)
failed_jobs[ude_record['PDBFileID']] = failed_jobs.get(ude_record['PDBFileID'], 0)
failed_jobs[ude_record['PDBFileID']] += 1
if show_full_errors:
print(e)
print(traceback.format_exc())
# Progress counter
count += 1
if showprogress and count % records_per_dot == 0: colortext.write(".", "green", flush = True)
if short_run and count >= short_run_limit: break
t2 = time.time()
print('Time taken for actual run: {0}s.'.format(t2 - t1 - time_to_ignore))
if failed_jobs:
colortext.error('Some jobs failed to run:\n%s' % pprint.pformat(failed_jobs))
if not quiet: print('')
print('Success')
tsession.commit()
tsession.close()
return True
except Exception, e:
print(str(e))
print(traceback.format_exc())
tsession.rollback()
tsession.close()
raise
def _create_pdb_residues_to_rosetta_cache_mp(self, pdb_residues_to_rosetta_cache, pdb_file_id, pdb_chains_to_keep, extra_rosetta_command_flags, keep_hetatm_lines):
# Retrieve the PDB file content, strip out the unused chains, and create a PDB object
raise Exception('Shane should finish this and add keep_all_lines')
assert(type(pdb_residues_to_rosetta_cache) == None)# use the manager dictproxy)
pdb_file = self.DDG_db.execute_select("SELECT * FROM PDBFile WHERE ID=%s", parameters = (pdb_file_id,))
p = PDB(pdb_file[0]['Content'])
p.strip_to_chains(list(pdb_chains_to_keep))
if not keep_hetatm_lines:
p.strip_HETATMs()
stripped_p = PDB('\n'.join(p.lines))
stripped_p.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, self.rosetta_database_path, extra_command_flags = extra_rosetta_command_flags)
atom_to_rosetta_residue_map = stripped_p.get_atom_sequence_to_rosetta_json_map()
rosetta_to_atom_residue_map = stripped_p.get_rosetta_sequence_to_atom_json_map()
cache_key = (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)), self.rosetta_scripts_path, self.rosetta_database_path, extra_rosetta_command_flags)
pdb_residues_to_rosetta_cache[cache_key] = dict(
stripped_p = stripped_p,
atom_to_rosetta_residue_map = atom_to_rosetta_residue_map,
rosetta_to_atom_residue_map = rosetta_to_atom_residue_map)
@job_creator
def add_prediction_run_mp(self, prediction_set_id, user_dataset_name, extra_rosetta_command_flags = None, protocol_id = None, tagged_subset = None, keep_hetatm_lines = False, input_files = {}, quiet = False, only_single_mutations = False, short_run = False, show_full_errors = False):
'''This is a multiprocessing version of add_prediction_run and should be used in favor of that function as it runs faster.
It takes advantage of parallelism at two points - creating the stripped PDB files and mutfiles for input and
inserting the jobs (MD5 is run multiple times for each job).
It was simple/quicker to write this as a 2-step method with a bottleneck in the middle i.e. it waits until all
stripped PDB files are generated before adding the jobs.
This could be made even more parallel by removing the bottleneck i.e. the process which strips the PDBs could
then call _add_job immediately rather than waiting for the other calls to _create_pdb_residues_to_rosetta_cache_mp
to complete.
'''
# Check preconditions
assert(keep_all_lines)
assert(suppress_warnings)
assert(tsession)
assert(not(input_files)) # todo: do something with input_files when we use that here - call self._add_file_content, associate the filenames with the FileContent IDs, and pass that dict to add_job which will create PredictionPPIFile records
assert(only_single_mutations == False) # todo: support this later? it may make more sense to just define new UserDataSets
self._add_prediction_run_preconditions(tsession, prediction_set_id, user_dataset_name, tagged_subset)
# Get the list of user dataset experiment records
user_dataset_experiments = self.get_user_dataset_experiments(tsession, user_dataset_name, tagged_subset = tagged_subset)
assert(set([u['IsComplex'] for u in user_dataset_experiments]) == set([1,]))
if not user_dataset_experiments:
return False
# Count the number of individual PDB files
pdb_file_ids = set([u['PDBFileID'] for u in user_dataset_experiments])
tagged_subset_str = ''
if not quiet:
if tagged_subset:
tagged_subset_str = 'subset "%s" of ' % tagged_subset
# Create a cache to speed up job insertion
#todo: start back here pdb_residues_to_rosetta_cache = manager dictproxy
# Create the stripped PDBs and residue maps in parallel using the multiprocessing module
#todo: write this function on Monday - get_user_dataset_pdb_partner_chains should return a set (<list of {'id' : pdb_file_id, 'L' : <list of chain ids>, , 'R' : <list of chain ids>} dicts>)
pdb_partner_chains = self.get_user_dataset_pdb_partner_chains(user_dataset_name, tagged_subset = tagged_subset)
#todo: start back here for ppc in pdb_partner_chains:
#todo: start back here apply_async self._create_pdb_residues_to_rosetta_cache_mp(pdb_residues_to_rosetta_cache, ppc['id'], set(ppc['L'] + ppc['R']), extra_rosetta_command_flags, keep_hetatm_lines)
#todo: start back here .join()
# Progress counter setup
failed_jobs = {}
if not quiet:
colortext.message('Adding %d predictions spanning %d PDB files for %suser dataset "%s" using protocol %s.' % (len(user_dataset_experiments), len(pdb_file_ids), tagged_subset_str, user_dataset_name, str(protocol_id or 'N/A')))
count, records_per_dot = 0, 50
showprogress = not(quiet) and len(user_dataset_experiments) > 300
if showprogress: print("|" + ("*" * (int(len(user_dataset_experiments)/records_per_dot)-2)) + "|")
# Add the individual predictions
for ude in user_dataset_experiments:
# If the mutagenesis already exists in the prediction set, do not add it again
if protocol_id:
existing_results = self.DDG_db.execute_select("SELECT * FROM PredictionPPI WHERE PredictionSet=%s AND UserPPDataSetExperimentID=%s AND ProtocolID=%s", parameters=(prediction_set_id, ude['ID'], protocol_id))
else:
existing_results = self.DDG_db.execute_select("SELECT * FROM PredictionPPI WHERE PredictionSet=%s AND UserPPDataSetExperimentID=%s AND ProtocolID IS NULL", parameters=(prediction_set_id, ude['ID']))
if len(existing_results) == 0:
# Add the prediction
try:
user_dataset_id = self.get_defined_user_datasets(tsession)[user_dataset_name]['ID']
prediction_id = self.add_job_by_user_dataset_record(prediction_set_id, user_dataset_name, ude['ID'], protocol_id, extra_rosetta_command_flags = extra_rosetta_command_flags, keep_all_lines = keep_all_lines, keep_hetatm_lines = keep_hetatm_lines, input_files = input_files, test_only = False, pdb_residues_to_rosetta_cache = pdb_residues_to_rosetta_cache, suppress_warnings = suppress_warnings)
except Exception, e:
user_dataset_id = self.get_defined_user_datasets(tsession)[user_dataset_name]['ID']
ude_record = self.DDG_db.execute_select('SELECT * FROM UserPPDataSetExperiment WHERE ID=%s AND UserDataSetID=%s', parameters=(ude['ID'], user_dataset_id))
ude_record = ude_record[0]
assert(ude_record['ID'] == ude['ID'])
colortext.error('Adding the prediction for UserPPDataSetExperimentID %(ID)d failed (%(PDBFileID)s).' % ude_record)
failed_jobs[ude_record['PDBFileID']] = failed_jobs.get(ude_record['PDBFileID'], 0)
failed_jobs[ude_record['PDBFileID']] += 1
if show_full_errors:
print(e)
print(traceback.format_exc())
# Progress counter
count += 1
if showprogress and count % records_per_dot == 0: colortext.write(".", "cyan", flush = True)
if short_run and count > 4: break
if failed_jobs:
colortext.error('Some jobs failed to run:\n%s' % pprint.pformat(failed_jobs))
if not quiet: print('')
return True
@job_creator
def clone_prediction_run(self, existing_prediction_set, new_prediction_set):
raise Exception('not implemented yet')
#assert(existing_prediction_set exists and has records)
#assert(new_prediction_set is empty)
#for each prediction record, add the record and all associated predictionfile records,
def _add_job(self, tsession, prediction_set_id, protocol_id, pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, extra_rosetta_command_flags = None, user_dataset_experiment_id = None, keep_all_lines = False, keep_hetatm_lines = False, input_files = {}, test_only = False, pdb_residues_to_rosetta_cache = {}, suppress_warnings = False):
'''This is the internal function which adds a prediction job to the database. We distinguish it from add_job as
prediction jobs added using that function should have no associated user dataset experiment ID.
pdb_residues_to_rosetta_cache can be used to speed up job insertion. When the same PDB/chains combination is used again, this cache uses the old mapping rather than run RosettaScripts again.
The extra_rosetta_command_flags variable is used to add additional flags e.g. "-ignore_zero_occupancy false".
These are used to generate a mapping from PDB to Rosetta numbering so they should be set according to how they
are set in the protocol. In particular, include any flags which have an effect on what residues are present.
'-ignore_zero_occupancy false' and '-ignore_unrecognized_res' are typically used.
'''
# todo: do something with input_files when we use that here - see add_prediction_run
assert(not(input_files))
# Preliminaries
if not self.rosetta_scripts_path or not os.path.exists(self.rosetta_scripts_path):
raise Exception('The path "%s" to the RosettaScripts executable does not exist.' % self.rosetta_scripts_path)
cache_maps = False
if isinstance(pdb_residues_to_rosetta_cache, dict):
cache_maps = True
# Information for debugging
pp_complex = None
for r in tsession.execute('''SELECT * FROM PPComplex WHERE ID=:pp_complex_id''', dict(pp_complex_id = pp_complex_id)):
assert(pp_complex == None)
pp_complex = r
# Determine the list of PDB chains that will be kept
pdb_chains = self.get_chains_for_mutatagenesis(pp_mutagenesis_id, pdb_file_id, pp_complex_pdb_set_number, complex_id = pp_complex_id, tsession = tsession)
pdb_chains_to_keep = set(pdb_chains['L'] + pdb_chains['R'])
if self.rosetta_database_path:
cache_key = (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)), self.rosetta_scripts_path, self.rosetta_database_path, extra_rosetta_command_flags)
else:
cache_key = (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)), self.rosetta_scripts_path, extra_rosetta_command_flags)
if cache_maps and pdb_residues_to_rosetta_cache.get(cache_key):
stripped_p = pdb_residues_to_rosetta_cache[cache_key]['stripped_p']
else:
# Retrieve the PDB file content, strip out the unused chains, and create a PDB object
p = PDB(tsession.query(dbmodel.PDBFile).filter(dbmodel.PDBFile.ID == pdb_file_id).one().Content)
stripped_p = p
if not keep_all_lines:
p.strip_to_chains(list(pdb_chains_to_keep))
if not keep_hetatm_lines:
p.strip_HETATMs()
stripped_p = PDB('\n'.join(p.lines))
# Determine PDB chains to move
pdb_chains_to_move_str = ','.join(sorted(set(pdb_chains['R'])))
# Check for CSE and MSE
try:
if 'CSE' in stripped_p.residue_types:
raise Exception('This case contains a CSE residue which may (or may not) cause an issue.')
elif 'MSE' in stripped_p.residue_types:
raise Exception('This case contains an MSE residue which may (or may not) cause an issue.')
# It looks like MSE (and CSE?) may now be handled - https://www.rosettacommons.org/content/pdb-files-rosetta-format
except Exception, e:
if not suppress_warnings:
colortext.error('%s: %s, chains %s' % (str(e), stripped_p.pdb_id or pdb_file_id, str(pdb_chains_to_keep)))
# Assert that there are no empty sequences
assert(sorted(stripped_p.atom_sequences.keys()) == sorted(pdb_chains_to_keep))
for chain_id, sequence in stripped_p.atom_sequences.iteritems():
assert(len(sequence) > 0)
# Get the PDB mutations and check that they make sense in the context of the stripped PDB file
# Note: the schema assumes that at most one set of mutations can be specified per PDB file per complex per mutagenesis. We may want to relax that in future by adding the SetNumber to the PPMutagenesisPDBMutation table
complex_mutations = [m for m in tsession.execute('SELECT * FROM PPMutagenesisMutation WHERE PPMutagenesisID=:pp_mutagenesis_id', dict(pp_mutagenesis_id = pp_mutagenesis_id))]
pdb_complex_mutations = [m for m in tsession.execute('SELECT * FROM PPMutagenesisPDBMutation WHERE PPMutagenesisID=:pp_mutagenesis_id AND PPComplexID=:pp_complex_id AND PDBFileID=:pdb_file_id', dict(pp_mutagenesis_id = pp_mutagenesis_id, pp_complex_id = pp_complex_id, pdb_file_id = pdb_file_id))]
assert(len(complex_mutations) == len(pdb_complex_mutations))
mutations = [ChainMutation(m['WildTypeAA'], m['ResidueID'], m['MutantAA'], Chain = m['Chain']) for m in pdb_complex_mutations]
try:
stripped_p.validate_mutations(mutations)
except Exception, e:
colortext.error('%s: %s' % (str(e), str(mutations)))
#colortext.warning('PPMutagenesisID=%d, ComplexID=%d, PDBFileID=%s, SetNumber=%d, UserDatasetExperimentID=%d' % (pp_mutagenesis_id, pp_complex_id, pdb_file_id, pp_complex_pdb_set_number, user_dataset_experiment_id))
#colortext.warning('SKEMPI record: %s' % self.DDG_db.execute_select('SELECT * FROM PPMutagenesis WHERE ID=%s', parameters=(pp_mutagenesis_id,))[0]['SKEMPI_KEY'])
#colortext.warning('PDB chains to keep: %s' % str(pdb_chains_to_keep))
#colortext.warning('PPIPDBPartnerChain records: %s' % pprint.pformat(self.DDG_db.execute_select('SELECT PPIPDBPartnerChain.* FROM PPIPDBPartnerChain INNER JOIN PPIPDBSet ON PPIPDBSet.PPComplexID=PPIPDBPartnerChain.PPComplexID AND PPIPDBSet.SetNumber=PPIPDBPartnerChain.SetNumber WHERE PPIPDBPartnerChain.PPComplexID=%s AND IsComplex=1 ORDER BY PPIPDBPartnerChain.SetNumber, PPIPDBPartnerChain.ChainIndex', parameters=(pp_complex_id,))))
# Determine the mapping from the stripped PDB to Rosetta numbering
# Note: we assume that this stripped PDB will be the input to the Rosetta protocol and that
# Make JSON mappings
if cache_maps and pdb_residues_to_rosetta_cache.get(cache_key):
atom_to_rosetta_residue_map = pdb_residues_to_rosetta_cache[cache_key]['atom_to_rosetta_residue_map']
rosetta_to_atom_residue_map = pdb_residues_to_rosetta_cache[cache_key]['rosetta_to_atom_residue_map']
else:
if self.rosetta_database_path:
stripped_p.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, self.rosetta_database_path, extra_command_flags = extra_rosetta_command_flags)
else:
stripped_p.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, extra_command_flags = extra_rosetta_command_flags)
atom_to_rosetta_residue_map = stripped_p.get_atom_sequence_to_rosetta_json_map()
rosetta_to_atom_residue_map = stripped_p.get_rosetta_sequence_to_atom_json_map()
if cache_maps and (not pdb_residues_to_rosetta_cache.get(cache_key)):
pdb_residues_to_rosetta_cache[cache_key] = dict(
stripped_p = stripped_p,
atom_to_rosetta_residue_map = atom_to_rosetta_residue_map,
rosetta_to_atom_residue_map = rosetta_to_atom_residue_map)
# Assert that there are no empty sequences in the Rosetta-processed PDB file
total_num_residues = 0
d = json.loads(rosetta_to_atom_residue_map)
stripped_p_chains = stripped_p.atom_sequences.keys()
for chain_id in stripped_p_chains:
num_chain_residues = len([z for z in d.values() if z[0] == chain_id])
total_num_residues += num_chain_residues
assert(num_chain_residues > 0)
pdb_filename = '%s_%s.pdb' % (pdb_file_id, ''.join(sorted(pdb_chains_to_keep)))
# Create parameter substitution dictionary
mutfile_name = 'mutations.mutfile'
resfile_name = 'mutations.resfile'
parameter_sub_dict = {
'%%input_pdb%%' : pdb_filename,
'%%chainstomove%%' : pdb_chains_to_move_str,
'%%pathtoresfile%%' : resfile_name,
'%%pathtomutfile%%' : mutfile_name,
}
if test_only:
return
# All functions below use tsession which allows us to use transactions which can be rolled back if errors occur
if protocol_id:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID=:protocol_id'.format(self._get_prediction_table()), dict(prediction_set = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id, protocol_id = protocol_id))]
else:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID IS NULL'.format(self._get_prediction_table()), dict(prediction_set = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id))]
assert(len(existing_records) == 0)
prediction_record = dict(
PredictionSet = prediction_set_id,
PPMutagenesisID = pp_mutagenesis_id,
UserPPDataSetExperimentID = user_dataset_experiment_id,
ProtocolID = protocol_id,
JSONParameters = json_dumps(parameter_sub_dict),
DevelopmentProtocolID = None,
ExtraParameters = extra_rosetta_command_flags,
Status = 'queued',
Cost = total_num_residues,
KeptHETATMLines = keep_hetatm_lines,
)
prediction_ppi = get_or_create_in_transaction(tsession, self._get_sqa_prediction_table(), dict(
PredictionSet = prediction_set_id,
PPMutagenesisID = pp_mutagenesis_id,
UserPPDataSetExperimentID = user_dataset_experiment_id,
ProtocolID = protocol_id,
JSONParameters = json_dumps(parameter_sub_dict),
DevelopmentProtocolID = None,
ExtraParameters = extra_rosetta_command_flags,
Status = 'queued',
Cost = total_num_residues,
KeptHETATMLines = keep_hetatm_lines,
), missing_columns = ['ID', 'EntryDate', 'StartDate', 'EndDate', 'Errors', 'AdminCommand', 'maxvmem', 'DDGTime', 'NumberOfMeasurements'])
#sql, params, record_exists = self.DDG_db.create_insert_dict_string(self._get_prediction_table(), prediction_record, ['PredictionSet', 'UserPPDataSetExperimentID', 'ProtocolID'])
#cur.execute(sql, params)
#prediction_id = cur.lastrowid
prediction_id = prediction_ppi.ID
# Add the stripped PDB file
self._add_prediction_file(tsession, prediction_id, '\n'.join(stripped_p.lines), pdb_filename, 'PDB', 'StrippedPDB', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'chemical/x-pdb')
# Make and add the mutfile
rosetta_mutations = stripped_p.map_pdb_residues_to_rosetta_residues(mutations)
self._add_mutfile_to_prediction(tsession, prediction_id, rosetta_mutations, mutfile_name)
# Make and add the resfile
self._add_resfile_to_prediction(tsession, prediction_id, mutations, resfile_name)
# Add the residue mappings
self._add_residue_map_json_to_prediction(tsession, prediction_id, rosetta_to_atom_residue_map, 'Rosetta residue->PDB residue map')
self._add_residue_map_json_to_prediction(tsession, prediction_id, atom_to_rosetta_residue_map, 'PDB residue->Rosetta residue map')
# Add the params files
self._add_ligand_params_files_to_prediction(tsession, prediction_id, pdb_file_id)
if protocol_id:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set_id AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID=:protocol_id'.format(self._get_prediction_table()),
dict(prediction_set_id = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id, protocol_id = protocol_id))]
else:
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE PredictionSet=:prediction_set_id AND UserPPDataSetExperimentID=:user_dataset_experiment_id AND ProtocolID IS NULL'.format(self._get_prediction_table()),
dict(prediction_set_id = prediction_set_id, user_dataset_experiment_id = user_dataset_experiment_id))]
assert(len(existing_records) == 1)
prediction_id = existing_records[0]['ID']
return prediction_id
#== Job execution/completion API ===========================================================
#
# This part of the API is responsible for starting jobs and setting them as failed or
# completed
@job_execution
def set_job_temporary_protocol_field(self, prediction_id, prediction_set_id, temporary_protocol_field):
raise Exception('not implemented yet')
@job_execution
def start_job(self, prediction_id, prediction_set_id):
'''Sets the job status to "active". prediction_set must be passed and is used as a sanity check.'''
prediction_record = self.DDG_db.execute_select('SELECT * FROM PredictionPPI WHERE ID=%s AND PredictionSet=%s', parameters=(prediction_id, prediction_set_id))
if prediction_record['Protocol'] == None:
print('empty Protocol')
if prediction_record['DevelopmentProtocolID'] == None:
raise Exception('Neither the Protocol nor the DevelopmentProtocolID is set for this job - it cannot be started without this information.')
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_execution
def get_max_number_of_cluster_jobs(self, prediction_set_id, priority):
return self.DDG_db.execute_select('SELECT Value FROM _DBCONSTANTS WHERE VariableName="MaxStabilityClusterJobs"')['Value']
@job_completion
def complete_job(self, prediction_id, prediction_set, scores, maxvmem, ddgtime, files = []):
'''Sets a job to 'completed' and stores scores. prediction_set must be passed and is used as a sanity check.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
###########################################################################################
## Analysis layer
##
## This part of the API is responsible for running analysis on completed predictions
###########################################################################################
@analysis_api
def determine_best_pairs(self, prediction_id, score_method_id = None, expectn = None, top_x = 3):
'''This returns the top_x lowest-scoring wildtype/mutants for a prediction given a scoring method.
The results are returned as a dict:
"wildtype" -> list(tuple(score, structure_id))
"mutant" -> list(tuple(score, structure_id))
If no scoring method is supplied then the first (i.e. random) top_x structures are returned (with scores set
to zero) as we have no method of scoring or discerning them.
.'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn)
if score_method_id != None:
assert(isinstance(top_x, int) and top_x > 0)
scores = scores.get(score_method_id)
mutant_complexes = []
wildtype_complexes = []
for structure_id, scores in scores.iteritems():
if scores.get('MutantComplex'):
mutant_complexes.append((scores['MutantComplex']['total'], structure_id))
if scores.get('WildTypeComplex'):
wildtype_complexes.append((scores['WildTypeComplex']['total'], structure_id))
wildtype_complexes = sorted(wildtype_complexes)[:top_x]
mutant_complexes = sorted(mutant_complexes)[:top_x]
else:
wt_structure_ids = set()
mut_structure_ids = set()
for score_method_id, scores in scores.iteritems():
for structure_id, scores in scores.iteritems():
if scores.get('WildTypeComplex'):
wt_structure_ids.add(structure_id)
if scores.get('MutantComplex'):
mut_structure_ids.add(structure_id)
wildtype_complexes = [(0, i) for i in sorted(wt_structure_ids)]
mutant_complexes = [(0, i) for i in sorted(mut_structure_ids)]
if top_x != None:
# If no score method is specified then we cannot choose the top X so we arbitrarily choose X structures
assert(isinstance(top_x, int) and top_x > 0)
wildtype_complexes = wildtype_complexes[:top_x]
mutant_complexes = mutant_complexes[:top_x]
# Truncate so that we have an equal number of both types
max_len = min(len(wildtype_complexes), len(mutant_complexes))
wildtype_complexes, mutant_complexes = wildtype_complexes[:max_len], mutant_complexes[:max_len]
if wildtype_complexes and mutant_complexes:
return {'wildtype' : wildtype_complexes, 'mutant' : mutant_complexes}
return {}
@app_pymol
def create_pymol_session_in_memory(self, prediction_id, wt_task_number, mutant_task_number, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
# Retrieve and unzip results
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
# Get the name of the files from the zip
wildtype_filename = 'repacked_wt_round_%d.pdb.gz' % wt_task_number
mutant_filename = None
for filepath in sorted(zipped_content.namelist()):
filename = os.path.split(filepath)[1]
if filename.startswith('mut_') and filename.endswith('_round_%d.pdb.gz' % mutant_task_number):
mutant_filename = filename
break
print(wildtype_filename, mutant_filename)
PyMOL_session = None
file_list = zipped_content.namelist()
print(file_list)
# If both files exist in the zip, extract their contents in memory and create a PyMOL session pair (PSE, script)
if (mutant_filename in file_list) and (wildtype_filename in file_list):
wildtype_pdb = zipped_content.open(wildtype_filename, 'r').read()
mutant_pdb = zipped_content.open(mutant_filename, 'U').read()
wildtype_pdb = read_file(write_temp_file('/tmp', wildtype_pdb, ftype = 'w', suffix = '.gz', prefix = ''))
mutant_pdb = read_file(write_temp_file('/tmp', mutant_pdb, ftype = 'w', suffix = '.gz', prefix = ''))
# todo: this should be structure_1_name = 'Wildtype', structure_2_name = 'Mutant' but the underlying PyMOL script needs to be parameterized
chain_mapper = ScaffoldModelChainMapper.from_file_contents(wildtype_pdb, mutant_pdb, structure_1_name = 'Scaffold', structure_2_name = 'Model')
PyMOL_session = chain_mapper.generate_pymol_session(pymol_executable = pymol_executable)
zipped_content.close()
return PyMOL_session
except Exception, e:
zipped_content.close()
raise Exception(str(e))
@app_pymol
def create_full_pymol_session_in_memory(self, prediction_id, score_method_id = None, top_x = 3, mutation_string = None, settings = {}, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol', wt_chain_seed = None, mutant_chain_seed = None):
wt_chain_seed = wt_chain_seed or 'blue'
mutant_chain_seed = mutant_chain_seed or 'yellow'
best_pairs = self.determine_best_pairs(prediction_id, score_method_id = score_method_id, expectn = None, top_x = top_x)
# Retrieve and unzip results
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
file_paths = {'wildtype' : {}, 'mutant' : {}}
# Get the name of the files from the zip
zip_filenames = set([os.path.split(filepath)[1] for filepath in zipped_content.namelist()])
# Retrieve the input structure
input_pdb_contents = None
try:
file_content_id = self.get_session().query(dbmodel.PredictionPPIFile).filter(and_(dbmodel.PredictionPPIFile.PredictionPPIID == prediction_id, dbmodel.PredictionPPIFile.FileRole == 'StrippedPDB')).one().FileContentID
input_pdb_contents = self.importer.get_file_content_from_cache(file_content_id)
except Exception, e:
# Report the error but continue
colortext.error(str(e))
colortext.error(traceback.format_exc())
# Find all wildtype structures
for p in best_pairs['wildtype']:
structure_id = p[1]
expected_filename = 'repacked_wt_round_{0}.pdb.gz'.format(structure_id)
if expected_filename in zip_filenames:
file_paths['wildtype'][structure_id] = expected_filename
# Find all mutant structures
mutant_ids = [p[1] for p in best_pairs['mutant']]
for filename in zip_filenames:
if filename.startswith('mut_'):
mtch = re.match('^mut_(.*?)_round_(\d+).pdb.*$', filename)
if mtch:
structure_id = int(mtch.group(2))
if structure_id in mutant_ids:
if not mutation_string:
mutation_string = mtch.group(1)
file_paths['mutant'][structure_id] = filename
PyMOL_session = None
file_list = zipped_content.namelist()
# If both files exist in the zip, extract their contents in memory and create a PyMOL session pair (PSE, script)
chain_mapper = DecoyChainMapper()
for stypep in [('wildtype', 'wt', wt_chain_seed, 'white'), ('mutant', mutation_string or 'mutant', mutant_chain_seed, 'red')]:
stype = stypep[0]
prefix = stypep[1].replace(' ', '_')
for structure_id, filename in file_paths[stype].iteritems():
if filename in file_list:
if filename.endswith('.gz'):
wildtype_pdb_stream = StringIO.StringIO(zipped_content.open(filename, 'r').read())
wildtype_pdb = gzip.GzipFile(fileobj=wildtype_pdb_stream).read()
else:
wildtype_pdb = zipped_content.open(filename, 'r').read()
pdb_object = PDB(wildtype_pdb)
chain_mapper.add(pdb_object, '{0}_n{1}'.format(prefix, structure_id), chain_seed_color = stypep[2], backbone_color = stypep[2], sidechain_color = stypep[3])
if input_pdb_contents:
chain_mapper.add(PDB(input_pdb_contents), 'input', backbone_color = 'grey50', sidechain_color = 'grey50')
zipped_content.close()
PyMOL_session = chain_mapper.generate_pymol_session(settings = settings, pymol_executable = pymol_executable)
return PyMOL_session
except Exception, e:
zipped_content.close()
raise Exception('{0}\n{1}'.format(str(e), traceback.format_exc()))
def _get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, expectn = None, extract_data_for_case_if_missing = False, root_directory = None, dataframe_type = "Binding affinity", prediction_data = {}):
assert( main_ddg_analysis_type.startswith('DDG_') )
analysis_type = main_ddg_analysis_type[4:]
top_x = 3
if analysis_type.startswith('Top'):
analysis_function = self.get_top_x_ddg
analysis_parameter = int( analysis_type[3:] )
top_x = analysis_parameter
elif analysis_type.startswith('Random'):
analysis_function = self.get_random_pairing_ddg
if len(analysis_type) > len('Random'):
analysis_parameter = int( analysis_type[len('Random'):] )
else:
analysis_parameter = None
elif analysis_type == 'AvgAllPairs':
analysis_function = self.get_avg_all_pairings_ddg
analysis_parameter = None
elif analysis_type == 'MatchPairs':
analysis_function = self.get_match_pairs_ddg
analysis_parameter = None
elif analysis_type.startswith('CplxBoltzWT'):
assert( len(analysis_type) > len('CplxBoltzWT') )
analysis_function = self.get_wt_complex_weighted_boltzmann_ddg
analysis_parameter = float( analysis_type[len('CplxBoltzWT'):] )
elif analysis_type.startswith('CplxBoltzMut'):
assert( len(analysis_type) > len('CplxBoltzMut') )
analysis_function = self.get_mut_complex_weighted_boltzmann_ddg
analysis_parameter = float( analysis_type[len('CplxBoltzMut'):] )
elif analysis_type.startswith('CplxBoltzBoth'):
assert( len(analysis_type) > len('CplxBoltzBoth') )
analysis_function = self.get_both_complex_weighted_boltzmann_ddg
analysis_parameter = float( analysis_type[len('CplxBoltzBoth'):] )
else:
raise Exception("Didn't recognize analysis type: " + str(main_ddg_analysis_type))
try:
predicted_ddg = analysis_function(prediction_id, score_method_id, analysis_parameter, expectn = expectn)
except Exception, e:
colortext.pcyan(str(e))
colortext.warning(traceback.format_exc())
if extract_data_for_case_if_missing:
self.extract_data_for_case(prediction_id, root_directory = root_directory, force = True, score_method_id = score_method_id)
try:
predicted_ddg = analysis_function(prediction_id, score_method_id, analysis_parameter, expectn = expectn)
except PartialDataException, e:
raise
except Exception, e:
raise
top_x_ddg_stability = self.get_top_x_ddg_stability(prediction_id, score_method_id, top_x = top_x, expectn = expectn)
prediction_data[main_ddg_analysis_type] = predicted_ddg
prediction_data['DDGStability_Top%d' % top_x] = top_x_ddg_stability
return prediction_data
@analysis_api
def get_wt_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None):
return self.get_complex_weighted_boltzmann_ddg(prediction_id, score_method_id, temperature, expectn = expectn, scores_to_weight = 'wt_complex')
@analysis_api
def get_mut_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None):
return self.get_complex_weighted_boltzmann_ddg(prediction_id, score_method_id, temperature, expectn = expectn, scores_to_weight = 'mut_complex')
@analysis_api
def get_both_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None):
return self.get_complex_weighted_boltzmann_ddg(prediction_id, score_method_id, temperature, expectn = expectn, scores_to_weight = 'both_complexes')
@analysis_api
def get_complex_weighted_boltzmann_ddg(self, prediction_id, score_method_id, temperature, expectn = None, scores_to_weight = 'wt_complex'):
'''
Returns DDG for this prediction by averaging all values for paired output structures
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
raise Exception("This scoring analysis doesn't make sense to use without complex scores")
def boltz_exponent(x, t):
return numpy.exp( -1.0 * x / t )
try:
np_type = numpy.float64
struct_nums = scores.keys()
mut_complex = numpy.array( [np_type( scores[struct_num]['MutantComplex']['total'] ) for struct_num in struct_nums] )
mut_lpartner = numpy.array( [np_type( scores[struct_num]['MutantLPartner']['total'] ) for struct_num in struct_nums] )
mut_rpartner = numpy.array( [np_type( scores[struct_num]['MutantRPartner']['total'] ) for struct_num in struct_nums] )
wt_complex = numpy.array( [np_type( scores[struct_num]['WildTypeComplex']['total'] ) for struct_num in struct_nums] )
wt_lpartner = numpy.array( [np_type( scores[struct_num]['WildTypeLPartner']['total'] ) for struct_num in struct_nums] )
wt_rpartner = numpy.array( [np_type( scores[struct_num]['WildTypeRPartner']['total'] ) for struct_num in struct_nums] )
matched_ddgs = (mut_complex - mut_lpartner - mut_rpartner) - (wt_complex - wt_lpartner - wt_rpartner)
if scores_to_weight == 'wt_complex':
scores_for_weighting = wt_complex
elif scores_to_weight == 'mut_complex':
scores_for_weighting = mut_complex
elif scores_to_weight == 'both_complexes':
scores_for_weighting = mut_complex + wt_complex
else:
raise Exception('Unrecognized scores_to_weight argument: ' + str(scores_to_weight) )
max_scores_for_weighting = numpy.max(scores_for_weighting)
normalized_scores_for_weighting = scores_for_weighting - max_scores_for_weighting
exponented_scores = numpy.exp( np_type(-1.0) * normalized_scores_for_weighting / np_type(temperature) )
weighted_ddg = numpy.divide(
numpy.sum( numpy.multiply(matched_ddgs, exponented_scores) ),
numpy.sum( exponented_scores )
)
return weighted_ddg
except PartialDataException:
sys.exit(0)
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_match_pairs_ddg(self, prediction_id, score_method_id, structs_to_use, expectn = None):
'''
Returns DDG for this prediction by averaging all values for paired output structures
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
raise Exception("This scoring analysis doesn't make sense to use without complex scores")
try:
structs_to_use_score = numpy.average([
(scores[struct_num]['MutantComplex']['total'] - scores[struct_num]['MutantLPartner']['total'] - scores[struct_num]['MutantRPartner']['total']) -
(scores[struct_num]['WildTypeComplex']['total'] - scores[struct_num]['WildTypeLPartner']['total'] - scores[struct_num]['WildTypeRPartner']['total'])
for struct_num in scores
])
return structs_to_use_score
except PartialDataException:
sys.exit(0)
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_avg_all_pairings_ddg(self, prediction_id, score_method_id, structs_to_use, expectn = None):
'''
Returns DDG for this prediction by averaging together all possible pairings
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
raise Exception("This scoring analysis doesn't make sense to use without complex scores")
try:
all_struct_num_pairs = []
for wt_struct_num in scores:
if 'WildTypeComplex' in scores[wt_struct_num]:
for mut_struct_num in scores:
if 'MutantComplex' in scores[mut_struct_num]:
all_struct_num_pairs.append( (wt_struct_num, mut_struct_num) )
structs_to_use_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in all_struct_num_pairs
])
return structs_to_use_score
except PartialDataException:
sys.exit(0)
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_random_pairing_ddg(self, prediction_id, score_method_id, structs_to_use, expectn = None):
'''
Returns DDG for this prediction by randomly pairing mutant structures with wildtype structures
'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
try:
total_scores = [scores[struct_num]['DDG']['total'] for struct_num in scores]
if structs_to_use == None:
structs_to_use = len(total_scores)
structs_to_use_score = numpy.average(
random.sample(total_scores, structs_to_use)
)
return structs_to_use_score
except:
raise PartialDataException('The case is missing some data.')
try:
if structs_to_use == None:
structs_to_use = len(scores)
else:
structs_to_use = min(structs_to_use, len(scores))
structs_to_use_wt_struct_nums = random.sample(scores.keys(), structs_to_use)
structs_to_use_mut_struct_nums = random.sample(scores.keys(), structs_to_use)
structs_to_use_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in zip(structs_to_use_wt_struct_nums, structs_to_use_mut_struct_nums)
])
return structs_to_use_score
except PartialDataException:
raise PartialDataException('The case is missing some data.')
@analysis_api
def get_top_x_ddg(self, prediction_id, score_method_id, top_x , expectn = None):
'''Returns the TopX value for the prediction. Typically, this is the mean value of the top X predictions for a
case computed using the associated Score records in the database.'''
# scores is a mapping from nstruct -> ScoreType -> score record where ScoreType is one of 'DDG', 'WildTypeLPartner', 'WildTypeRPartner', 'WildTypeComplex', 'MutantLPartner', 'MutantRPartner', 'MutantComplex'
# if we do the calculation in Python, pull scores out to the top level first
# otherwise, we can add a stored procedure to determine the TopX
# if we go the Python route, we can implement different variations on TopX (including a stored procedure) and pass the function pointers as an argument to the main analysis function
# Make sure that we have as many cases as we expect
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None:
return None
if self.scores_contains_ddg_score(scores):
try:
total_scores = [(scores[struct_num]['DDG']['total'], struct_num) for struct_num in scores]
total_scores.sort()
top_x_struct_nums = [t[1] for t in total_scores[:top_x]]
top_x_score = numpy.average([
scores[struct_num]['DDG']['total']
for struct_num in top_x_struct_nums
])
return top_x_score
except:
print scores[struct_num]
raise PartialDataException('The case is missing some data.')
try:
wt_total_scores = [(scores[struct_num]['WildTypeComplex']['total'], struct_num) for struct_num in scores]
wt_total_scores.sort()
top_x_wt_struct_nums = [t[1] for t in wt_total_scores[:top_x]]
mut_total_scores = [(scores[struct_num]['MutantComplex']['total'], struct_num) for struct_num in scores]
mut_total_scores.sort()
top_x_mut_struct_nums = [t[1] for t in mut_total_scores[:top_x]]
top_x_score = numpy.average([
(scores[mut_struct_num]['MutantComplex']['total'] - scores[mut_struct_num]['MutantLPartner']['total'] - scores[mut_struct_num]['MutantRPartner']['total']) -
(scores[wt_struct_num]['WildTypeComplex']['total'] - scores[wt_struct_num]['WildTypeLPartner']['total'] - scores[wt_struct_num]['WildTypeRPartner']['total'])
for wt_struct_num, mut_struct_num in zip(top_x_wt_struct_nums, top_x_mut_struct_nums)
])
return top_x_score
except:
raise PartialDataException('The case is missing some data.')
def scores_contains_ddg_score(self, scores):
for struct_num, score_dict in scores.iteritems():
if 'DDG' not in score_dict:
return False
return True
def scores_contains_complex_scores(self, scores):
for struct_num, score_dict in scores.iteritems():
if 'WildTypeComplex' not in score_dict or 'MutantComplex' not in score_dict:
return False
return True
@analysis_api
def get_top_x_ddg_stability(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''Returns the TopX value for the prediction only considering the complex scores. This computation may work as a
measure of a stability DDG value.'''
scores = self.get_prediction_scores(prediction_id, expectn = expectn).get(score_method_id)
if scores == None or not self.scores_contains_complex_scores(scores):
return None
wt_total_scores = [(scores[struct_num]['WildTypeComplex']['total'], struct_num) for struct_num in scores]
wt_total_scores.sort()
top_x_wt_struct_nums = [t[1] for t in wt_total_scores[:top_x]]
mut_total_scores = [(scores[struct_num]['MutantComplex']['total'], struct_num) for struct_num in scores]
mut_total_scores.sort()
top_x_mut_struct_nums = [t[1] for t in mut_total_scores[:top_x]]
return numpy.average([scores[mut_struct_num]['MutantComplex']['total'] - scores[wt_struct_num]['WildTypeComplex']['total']
for wt_struct_num, mut_struct_num in zip(top_x_wt_struct_nums, top_x_mut_struct_nums)])
@analysis_api
def get_analysis_dataframe(self, prediction_set_id,
experimental_data_exists = True,
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
ddg_analysis_type = 'DDG_Top3',
take_lowest = None,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = False,
debug = False,
restrict_to = set(),
remove_cases = set(),
):
#todo: rename function since we return BenchmarkRun objects
assert(score_method_id)
dataframe_type = 'Binding affinity'
parameters = copy.copy(locals())
del parameters['self']
return super(BindingAffinityDDGInterface, self)._get_analysis_dataframe(BindingAffinityBenchmarkRun, **parameters)
@analysis_api
def get_existing_analysis(self, prediction_set_id = None, analysis_dataframe_id = None, return_dataframe = True):
'''Returns a list of the summary statistics for any existing dataframes in the database.
Each item in the list is a dict corresponding to a dataframe. These dicts are structured as e.g.
{
'AnalysisDataFrameID': 185L,
'analysis_sets': ['SKEMPI', 'BeAtMuSiC', 'ZEMu'],
'analysis_type': 'DDG_Top3',
'analysis_type_description': '...',
'dataframe': <pandas dataframe>,
'scalar_adjustments': {
'BeAtMuSiC': 2.383437079488905,
'SKEMPI': 2.206268329703589,
'ZEMu': 2.2046199780552374
},
'stats': {
'BeAtMuSiC': {
'MAE': nan,
'fraction_correct': 0.7308900523560209,
'fraction_correct_fuzzy_linear': 0.74128683025321573,
'gamma_CC': 0.4047074501135616,
'ks_2samp': (0.24269480519480513, 2.9466866316296972e-32),
'kstestx': (nan, nan),
'kstesty': (nan, nan),
'normaltestx': (nan, nan),
'normaltesty': (nan, nan),
'pearsonr': (nan, 1.0),
'spearmanr': (0.41841534629950339, 2.1365219255798831e-53)
},
'SKEMPI': {...},
'ZEMu': {...},
}
}
'''
### KAB TODO: this function is not adjusted for new changes in top_x
if analysis_dataframe_id == None:
# Get a valid PredictionSet record if one exists
assert(prediction_set_id != None)
try:
prediction_set = self.get_session().query(dbmodel.PredictionSet).filter(and_(dbmodel.PredictionSet.ID == prediction_set_id, dbmodel.PredictionSet.BindingAffinity == 1)).one()
except:
return None
dataframes = self.get_session().query(dbmodel.AnalysisDataFrame).filter(and_(dbmodel.AnalysisDataFrame.PredictionSet == prediction_set_id, dbmodel.AnalysisDataFrame.DataFrameType == 'Binding affinity')).order_by(dbmodel.AnalysisDataFrame.ScoreMethodID, dbmodel.AnalysisDataFrame.TopX, dbmodel.AnalysisDataFrame.StabilityClassicationExperimentalCutoff, dbmodel.AnalysisDataFrame.StabilityClassicationPredictedCutoff)
else:
try:
dataframe = self.get_session().query(dbmodel.AnalysisDataFrame).filter(dbmodel.AnalysisDataFrame.ID == analysis_dataframe_id).one()
assert(dataframe.DataFrameType == 'Binding affinity')
dataframes = [dataframe]
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
return None
analysis_results = []
dataframes = [dfr for dfr in dataframes]
for dfr in dataframes:
# The dict to return
dfi = dfr.get_dataframe_info()
dfi['stats'] = {}
# Compute the stats per analysis set
df = dfi['dataframe']
if dfi['analysis_sets']:
# Case where there are analysis sets
for analysis_set in dfi['analysis_sets']:
dfi['stats'][analysis_set] = get_xy_dataset_statistics_pandas(
df,
BindingAffinityBenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set),
BindingAffinityBenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set),
fcorrect_x_cutoff = float(dfr.StabilityClassicationExperimentalCutoff),
fcorrect_y_cutoff = float(dfr.StabilityClassicationPredictedCutoff),
ignore_null_values = True)
elif 'Experimental' in df.columns:
# Case where there are no analysis sets
dfi['stats']['Global'] = get_xy_dataset_statistics_pandas(
df,
'Experimental',
'Predicted_adj',
fcorrect_x_cutoff = float(dfr.StabilityClassicationExperimentalCutoff),
fcorrect_y_cutoff = float(dfr.StabilityClassicationPredictedCutoff),
ignore_null_values = True)
else:
# Case where there are no experimental data
dfi['stats'] = None
if not return_dataframe:
# May be useful if we are keeping a lot of these in memory and the dataframe is not useful
dfi['dataframe'] = None
analysis_results.append(dfi)
return analysis_results
@analysis_api
def analyze(self, prediction_set_ids, score_method_ids,
experimental_data_exists = True,
analysis_set_ids = [],
prediction_set_series_names = {}, prediction_set_descriptions = {}, prediction_set_credits = {}, prediction_set_colors = {}, prediction_set_alphas = {},
use_published_data = False,
allow_failures = False,
use_existing_benchmark_data = True, recreate_graphs = False,
include_derived_mutations = False,
expectn = 50,
use_single_reported_value = False,
take_lowests = [],
ddg_analysis_types = [],
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
output_directory = None,
output_directory_root = None,
generate_plots = True,
generate_matplotlib_plots = False,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
debug = False,
restrict_to = set(),
remove_cases = set(),
call_analysis = True,
):
'''Runs the analyses for the specified PredictionSets and cross-analyzes the sets against each other if appropriate.
* Analysis setup arguments *
PredictionSets is a list of PredictionSet IDs. Each PredictionSet will be analyzed separately and appropriate
pairs will be cross-analyzed.
PredictionSetSeriesNames, PredictionSetDescriptions, and PredictionSetCredits are mappings from PredictionSet IDs
to series names (in plots), descriptions, and credits respectively. The details are stored in PredictionSet so
they are not necessary. The mappings can be used to override the database values to customize the analysis
reports. Likewise, PredictionSetColors and PredictionSetAlphas are mappings to series colors and transparency values
for use in the plots.
use_published_data. todo: implement later. This should include any published data e.g. the Kellogg et al. data for protein stability.
use_existing_benchmark_data and recreate_graphs are data creation arguments i.e. "should we use existing data or create it from scratch?"
include_derived_mutations is used to filter out dataset cases with derived mutations.
expectn declares how many predictions we expect to see per dataset case. If the actual number is less than expectn
then a warning will be included in the analysis.
* Dataframe arguments *
use_single_reported_value is specific to ddg_monomer. If this is True then the DDG value reported by the application is used and take_lowest is ignored. This is inadvisable - take_lowest = 3 is a better default.
take_lowest AKA Top_X. Specifies how many of the best-scoring groups of structures to consider when calculating the predicted DDG value.
analysis_types defines if other analysis methods other than TopX/take_lowest will be used. Not mutually exclusive.
burial_cutoff defines what should be considered buried (DSSPExposure field). Values around 1.0 are fully exposed, values of 0.0 are fully buried. For technical reasons, the DSSP value can exceed 1.0 but usually not by much.
stability_classication_experimental_cutoff AKA x_cutoff. This defines the neutral mutation range for experimental values in kcal/mol i.e. values between -1.0 and 1.0 kcal/mol are considered neutral by default.
stability_classication_predicted_cutoff AKA y_cutoff. This defines the neutral mutation range for predicted values in energy units.
* Reporting arguments *
output_directory : The directory in which to save plots and reports.
output_directory_root : A place to create an autogenerated output directory.
generate_plots : if plots are not needed, setting this to False can shorten the analysis time.
report_analysis : Whether or not to print analysis to stdout.
silent = False : Whether or not anything should be printed to stdout (True is useful for webserver interaction).
'''
for ddg_analysis_type in ddg_analysis_types:
assert( ddg_analysis_type.startswith('DDG_') )
for take_lowest in take_lowests:
assert(take_lowest > 0 and (int(take_lowest) == take_lowest))
ddg_analysis_types.append( 'DDG_Top%d' % take_lowest )
# Remove duplicate analysis types
ddg_analysis_types = set( ddg_analysis_types )
ddg_analysis_types = sorted( list(ddg_analysis_types) )
assert(0 <= burial_cutoff <= 2.0)
assert(stability_classication_experimental_cutoff > 0)
assert(stability_classication_predicted_cutoff > 0)
assert(expectn > 0 and (int(expectn) == expectn))
# Can't specify both output_directory and output_directory_root
if output_directory_root != None:
assert( output_directory == None )
if not os.path.isdir( output_directory_root ):
os.makedirs( output_directory_root )
if output_directory != None:
assert( output_directory_root == None )
benchmark_runs = []
for prediction_set_id in prediction_set_ids:
if len(prediction_set_ids) > 1:
print 'Generating benchmark run for prediction set: %s' % prediction_set_id
for score_method_id in score_method_ids:
if len(score_method_ids) > 1:
print 'Generating benchmark run for score method ID: %d' % score_method_id
for ddg_analysis_type in ddg_analysis_types:
if len(ddg_analysis_types) > 1:
print 'Generating benchmark run for DDG analysis type: %s' % ddg_analysis_type
benchmark_run = self.get_analysis_dataframe(prediction_set_id,
experimental_data_exists = experimental_data_exists,
prediction_set_series_name = prediction_set_series_names.get(prediction_set_id),
prediction_set_description = prediction_set_descriptions.get(prediction_set_id),
prediction_set_color = prediction_set_colors.get(prediction_set_id),
prediction_set_alpha = prediction_set_alphas.get(prediction_set_id),
prediction_set_credit = prediction_set_credits[prediction_set_id],
use_existing_benchmark_data = use_existing_benchmark_data,
include_derived_mutations = include_derived_mutations,
use_single_reported_value = use_single_reported_value,
ddg_analysis_type = ddg_analysis_type,
burial_cutoff = burial_cutoff,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = report_analysis,
silent = silent,
root_directory = root_directory, # where to find the
score_method_id = score_method_id,
expectn = expectn,
allow_failures = allow_failures,
debug = debug,
restrict_to = restrict_to,
remove_cases = remove_cases,
)
# The keys of scalar_adjustments are the stored analysis sets
analysis_sets_to_run = benchmark_run.scalar_adjustments.keys()
if analysis_set_ids:
analysis_sets_to_run = set(analysis_sets_to_run).intersection(set(analysis_set_ids))
benchmark_runs.append(benchmark_run)
analysis_sets_to_run = sorted(analysis_sets_to_run)
if experimental_data_exists:
#todo: hack. this currently seems to expect all datapoints to be present. handle the case when we are missing data e.g. prediction set "ZEMu run 1"
analysis_sets_to_run = ['ZEMu'] # ['BeAtMuSiC', 'SKEMPI', 'ZEMu']
if call_analysis:
if len(benchmark_runs) == 1 and len(analysis_sets_to_run) == 1:
if output_directory_root:
# Create output directory inside output_directory_root
output_directory = os.path.join(output_directory_root, '%s-%s-%s_n-%d_topx-%d_score_method_%d-analysis_%s' % (time.strftime("%y%m%d"), getpass.getuser(), prediction_set_id, expectn, take_lowest, score_method_id, analysis_set_id))
colortext.message(analysis_set_id)
benchmark_run.full_analysis(analysis_set_id, output_directory)
else:
if output_directory or not output_directory_root:
raise Exception("Multiple benchmark run objects will be analyzed and output created; this requires setting output_directory_root instead of output_directory")
BindingAffinityBenchmarkRun.analyze_multiple(
benchmark_runs,
analysis_sets = analysis_sets_to_run,
analysis_directory = output_directory_root,
)
else:
return (benchmark_runs, analysis_sets_to_run)
################################################################################################
## Private API layer
## These are helper functions used internally by the class but which are not intended for export
################################################################################################
###########################################################################################
## Subclass layer
##
## These functions need to be implemented by subclasses
###########################################################################################
# Concrete functions
def _get_sqa_prediction_table(self): return dbmodel.PredictionPPI
def _get_sqa_prediction_structure_scores_table(self): return dbmodel.PredictionPPIStructureScore
def _get_sqa_user_dataset_experiment_table(self): return dbmodel.UserPPDataSetExperiment
def _get_sqa_user_dataset_experiment_tag_table(self): return dbmodel.UserPPDataSetExperimentTag
def _get_sqa_user_dataset_experiment_tag_table_udsid(self): return dbmodel.UserPPDataSetExperimentTag.UserPPDataSetExperimentID
def _get_sqa_predictions_user_dataset_experiment_id(self, p): return p.UserPPDataSetExperimentID
def _get_sqa_prediction_type(self): return dbmodel.PredictionSet.BindingAffinity
prediction_table = 'PredictionPPI'
def _get_prediction_table(self): return self.prediction_table
prediction_structure_scores_table = 'PredictionPPIStructureScore'
def _get_prediction_structure_scores_table(self): return self.prediction_structure_scores_table
def _get_prediction_type(self): return 'BindingAffinity'
def _get_prediction_dataset_type(self): return 'Binding affinity'
def _get_prediction_type_description(self): return 'binding affinity'
def _get_user_dataset_experiment_table(self): return 'UserPPDataSetExperiment'
def _get_user_dataset_experiment_tag_table(self): return 'UserPPDataSetExperimentTag'
def _get_allowed_score_types(self): return set(['DDG', 'WildTypeLPartner', 'WildTypeRPartner', 'WildTypeComplex', 'MutantLPartner', 'MutantRPartner', 'MutantComplex'])
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_job
def get_development_protocol(self, development_protocol_id):
results = self.DDG_db.execute_select('SELECT * FROM DevelopmentProtocol WHERE ID = %s', parameters=(development_protocol_id,) )
assert( len(results) == 1 )
return results[0]
@informational_pdb
def get_complex_ids_matching_protein_name(self, partial_name, tsession = None):
'''Returns a list of PPComplex IDs where at least one of the partner names matches partial_name.'''
tsession = self.importer.get_session(utf = True)
tsession_utf = self.importer.get_session()
results = []
partial_name_ascii = partial_name.encode('ascii', errors='ignore').decode('ascii') # ugh
if len(partial_name.split()) == 1 and len(partial_name) <= 4:
results += [c.ID for c in tsession_utf.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LName.like(u'^' + partial_name),
dbmodel.PPComplex.LShortName.like(u'^' + partial_name),
dbmodel.PPComplex.RName.like(u'^' + partial_name),
dbmodel.PPComplex.RShortName.like(u'^' + partial_name)))]
results += [c.ID for c in tsession.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LHTMLName.like('^' + partial_name_ascii),
dbmodel.PPComplex.RHTMLName.like('^' + partial_name_ascii)))]
results += [c.ID for c in tsession_utf.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LName.like(partial_name + u'$'),
dbmodel.PPComplex.LShortName.like(partial_name + u'$'),
dbmodel.PPComplex.RName.like(partial_name + u'$'),
dbmodel.PPComplex.RShortName.like(partial_name + u'$')))]
results += [c.ID for c in tsession.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LHTMLName.like(partial_name_ascii + '$'),
dbmodel.PPComplex.RHTMLName.like(partial_name_ascii + '$')))]
else:
results += [c.ID for c in tsession_utf.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LName.like(u'%' + partial_name + u'%'),
dbmodel.PPComplex.LShortName.like(u'%' + partial_name + u'%'),
dbmodel.PPComplex.RName.like(u'%' + partial_name + u'%'),
dbmodel.PPComplex.RShortName.like(u'%' + partial_name + u'%')))]
results += [c.ID for c in tsession.query(dbmodel.PPComplex).filter(or_(
dbmodel.PPComplex.LHTMLName.like('%' + partial_name_ascii + '%'),
dbmodel.PPComplex.RHTMLName.like('%' + partial_name_ascii + '%')))]
return results
qry = '''SELECT ID FROM PPComplex
WHERE
LName LIKE %s
OR LShortName LIKE %s
OR LHTMLName LIKE %s
OR RName LIKE %s
OR RShortName LIKE %s
OR RHTMLName LIKE %s ORDER BY ID'''
if len(partial_name.split()) == 1 and len(partial_name) <= 4:
# for short names, we require that any matches have the string as a prefix or suffix as otherwise we may get many matches e.g. 'RAN' matches 'transferase', 'membrane', etc.
partial_name_ascii = partial_name.encode('ascii', errors='ignore').decode('ascii') # ugh
results += self.DDG_db_utf.execute_select(qry, parameters=(u'%{0}'.format(partial_name), u'%{0}'.format(partial_name), '%{0}'.format(partial_name_ascii), u'%{0}'.format(partial_name), u'%{0}'.format(partial_name), '%{0}'.format(partial_name_ascii)))
results += self.DDG_db_utf.execute_select(qry, parameters=(u'{0}%'.format(partial_name), u'{0}%'.format(partial_name), '{0}%'.format(partial_name_ascii), u'{0}%'.format(partial_name), u'{0}%'.format(partial_name), '{0}%'.format(partial_name_ascii)))
else:
partial_name_ascii = partial_name.encode('ascii', errors='ignore').decode('ascii') # ugh
results += self.DDG_db_utf.execute_select(qry, parameters=(u'%{0}%'.format(partial_name), u'%{0}%'.format(partial_name), '%{0}%'.format(partial_name_ascii), u'%{0}%'.format(partial_name), u'%{0}%'.format(partial_name), '%{0}%'.format(partial_name_ascii)))
return [r['ID'] for r in results]
@informational_pdb
def _get_pdb_chains_used_for_prediction_set(self, prediction_set):
raise Exception('not implemented yet')
return self.DDG_db.execute_select('''
SELECT Prediction.ID, Experiment.PDBFileID, Chain
FROM Prediction
INNER JOIN Experiment ON Experiment.ID=Prediction.ExperimentID
INNER JOIN ExperimentChain ON ExperimentChain.ExperimentID=Prediction.ExperimentID
WHERE PredictionSet=%s''', parameters=(prediction_set,))
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
def _charge_prediction_set_by_residue_count(self, PredictionSet):
'''This function assigns a cost for a prediction equal to the number of residues in the chains.'''
raise Exception('This function needs to be rewritten.')
from klab.bio.rcsb import parseFASTAs
DDG_db = self.DDG_db
predictions = DDG_db.execute_select("SELECT ID, ExperimentID FROM Prediction WHERE PredictionSet=%s", parameters=(PredictionSet,))
PDB_chain_lengths ={}
for prediction in predictions:
chain_records = DDG_db.execute_select('SELECT PDBFileID, Chain FROM Experiment INNER JOIN ExperimentChain ON ExperimentID=Experiment.ID WHERE ExperimentID=%s', parameters=(prediction['ExperimentID']))
num_residues = 0
for chain_record in chain_records:
key = (chain_record['PDBFileID'], chain_record['Chain'])
if PDB_chain_lengths.get(key) == None:
fasta = DDG_db.execute_select("SELECT FASTA FROM PDBFile WHERE ID=%s", parameters = (chain_record['PDBFileID'],))
assert(len(fasta) == 1)
fasta = fasta[0]['FASTA']
f = parseFASTAs(fasta)
PDB_chain_lengths[key] = len(f[chain_record['PDBFileID']][chain_record['Chain']])
chain_length = PDB_chain_lengths[key]
num_residues += chain_length
print("UPDATE Prediction SET Cost=%0.2f WHERE ID=%d" % (num_residues, prediction['ID']))
predictions = DDG_db.execute("UPDATE Prediction SET Cost=%s WHERE ID=%s", parameters=(num_residues, prediction['ID'],))
def _get_dev_protocol_id(self, name):
dev_protocol_ids = self.DDG_db.execute_select("SELECT ID FROM DevelopmentProtocol WHERE Name=%s", parameters = (name,))
if len(dev_protocol_ids) == 0:
return None
elif len(dev_protocol_ids) == 1:
return int(dev_protocol_ids[0]['ID'])
else:
raise Exception("DevelopmentProtocol table was originally set up so that names are unique; this has obviously changed")
def _create_dev_protocol(self, name, application, template_command_line):
dev_prot_record = {
'Name' : name,
'Application' : application,
'TemplateCommandLine' : template_command_line,
}
sql, params, record_exists = self.DDG_db.create_insert_dict_string('DevelopmentProtocol', dev_prot_record)
self.DDG_db.execute(sql, params)
###########################################################################################
## Data entry layer
##
## This part of the API is responsible for data entry (e.g. complex definitions)
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
#######################################
# #
# Protein-protein complex data entry #
# public API #
# #
# #
# PPComplex #
# PPIPDBPartnerChain #
# PPIPDBSet #
# #
# Missing tables: #
# PPIConformationalChange #
# PPIDatabaseComplex #
# PPIDataSetCrossmap #
# #
#######################################
@ppi_data_entry
def find_complex(self, pdb_ids, keywords = [], tsession = None, quiet = True):
possible_match_ids = []
for pdb_id in pdb_ids:
existing_records = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))
if existing_records and not quiet:
colortext.warning('The PDB file {0} exists in the database.'.format(pdb_id))
complex_ids = self.search_complexes_by_pdb_id(pdb_id)
if complex_ids:
if existing_records and not quiet:
colortext.warning('The PDB file {0} has associated complexes: {1}'.format(pdb_id, ', '.join(map(str, complex_ids))))
assert(len(complex_ids) == 1)
complex_id = complex_ids[0]
#colortext.warning('Complex #{0}'.format(complex_id))
#pprint.pprint(self.get_complex_details(complex_id))
assert(type(keywords) == list)
keywords = set(keywords)
for keyword in keywords:
hits = self.get_complex_ids_matching_protein_name(keyword, tsession = tsession)
if hits:
if not quiet:
colortext.warning('Partial match on "{0}".'.format(keyword))
possible_match_ids.extend(hits)
possible_match_idses = sorted(set(possible_match_ids))
return [self.get_complex_details(id) for id in possible_match_ids]
@ppi_data_entry
def add_complex_structure_pair(self, complex_structure_definition_pair, keywords = None, force = False, previously_added = set(), trust_database_content = False, update_sections = set(), allow_missing_params_files = False, debug = False, minimum_sequence_identity = 95.0):
'''Wrapper function for add_designed_pdb and add_complex.
complex_structure_definition_pair should be a dict with the structure:
dict(
Structure = <see the definition in kddg.api.data:add_designed_pdb>,
Complex = <see the definition in ppi_api:add_complex>,
)
To simplify the logic, we treat this function call as an atomic call i.e. it creates its own session and rolls back or commits.
'''
# Sanity checks
assert(complex_structure_definition_pair['Complex']['structure_id'] == complex_structure_definition_pair['Structure']['db_id'])
if 'chain_mapping' in complex_structure_definition_pair['Structure']:
assert(sorted(complex_structure_definition_pair['Structure']['chain_mapping'].keys()) == sorted(complex_structure_definition_pair['Complex']['LChains'] + complex_structure_definition_pair['Complex']['RChains']))
# Create a new session
tsession = self.importer.get_session(new_session = True, utf = False)
try:
# Add the structure
self.importer.add_designed_pdb(complex_structure_definition_pair['Structure'], previously_added = previously_added, trust_database_content = trust_database_content,
update_sections = update_sections, allow_missing_params_files = allow_missing_params_files,
minimum_sequence_identity = minimum_sequence_identity, tsession = tsession, debug = debug)
if debug:
tsession.rollback()
else:
tsession.commit()
tsession.close()
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
tsession = self.importer.get_session(new_session = True, utf = True)
try:
# Add the complex definition and PDB definition
api_response = self.add_complex(complex_structure_definition_pair['Complex'], keywords = keywords, force = force, debug = debug, tsession = tsession)
if api_response['success']:
str(api_response['PPIPDBSet']) # this forced lookup of partner_chains seems to be crucial when accessing it later (which should only be done for printing as the data cannot be guaranteed to be up-to-date)
tsession.expunge_all() # note: we only need to expunge api_response['PPIPDBSet'].partner_chains (it is loaded lazily/deferred)
if debug:
api_response = dict(success = False, error = 'Debug call - rolling back the transaction.')
tsession.rollback()
else:
tsession.commit()
else:
tsession.rollback()
tsession.close()
return api_response
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
def lookup_pdb_set(self, tsession, passed_pdb_set, allow_partial_matches = True, complex_id = None):
'''Takes a dict {'L' -> List(Tuple(PDB ID, Chain ID)), 'R' -> List(Tuple(PDB ID, Chain ID))} and returns all PDB
sets (complex_id, set_number, reverse_match) which have either partial or exact matches depending on
whether allow_partial_matches is True or False respectively. If reverse_match is True it means that the
partner definitions are reversed (left partner = right partner,...).
The matching is symmetric over the partner definitions i.e. if L1 matches R2 and R1 matches L2 then we consider this a match.
If complex_id is specified then we restrict matches to that particular ID (PPComplex.ID). Otherwise, all definitions
in the database are considered.
If allow_partial_matches is True then we return hits if there is at least one common chain in each partner.
Otherwise, we return hits if there are exact matches (modulo chain ordering)
'''
defined_sets = {}
if complex_id != None:
# Consider sets for a specific complex
defined_sets[complex_id] = {}
for r in tsession.query(dbmodel.PPIPDBPartnerChain).filter(dbmodel.PPIPDBPartnerChain.PPComplexID == complex_id):
set_number = r.SetNumber
defined_sets[complex_id][set_number] = defined_sets[complex_id].get(set_number, {'L' : [], 'R' : []})
defined_sets[complex_id][set_number][r.Side].append((r.PDBFileID, r.Chain))
else:
# Consider all sets
for r in tsession.query(dbmodel.PPIPDBPartnerChain):
set_number = r.SetNumber
c_id = r.PPComplexID
defined_sets[c_id] = defined_sets.get(c_id, {})
defined_sets[c_id][set_number] = defined_sets[c_id].get(set_number, {'L' : [], 'R' : []})
defined_sets[c_id][set_number][r.Side].append((r.PDBFileID, r.Chain))
set_number_hits = set()
for c_id, set_definitions in sorted(defined_sets.iteritems()):
for set_number, set_partners in sorted(set_definitions.iteritems()):
# Check for matches against the stored PDB sets. Check for the symmetric definition as well
if allow_partial_matches:
# Partial matching
if set(passed_pdb_set['L']).intersection(set_partners['L']) and set(passed_pdb_set['R']).intersection(set_partners['R']):
set_number_hits.add((c_id, set_number, False))
if set(passed_pdb_set['L']).intersection(set_partners['R']) and set(passed_pdb_set['R']).intersection(set_partners['L']):
set_number_hits.add((c_id, set_number, True))
else:
# Exact matching
if (sorted(passed_pdb_set['L']) == sorted(set_partners['L'])) and (sorted(passed_pdb_set['R']) == sorted(set_partners['R'])):
set_number_hits.add((c_id, set_number, False))
if (sorted(passed_pdb_set['L']) == sorted(set_partners['R'])) and (sorted(passed_pdb_set['R']) == sorted(set_partners['L'])):
set_number_hits.add((c_id, set_number, True))
if len(set([t[2] for t in set_number_hits])) > 1:
raise colortext.Exception('WARNING: the complex definition has at least two PDB sets where the left and right partners are in the reverse direction. This indicates a redundancy in the database.')
return sorted(set_number_hits)
def lookup_complex_by_details(self, tsession, complex_details, allow_partial_matches = True):
'''Takes a complex_details dict (as defined in add_complex) for a bound complex (i.e. a single PDB ID) and returns
the corresponding complex(es) and PDB set details if the defined complex exists in the database.
There are two paths. First, we check whether a complex exists with an exact match on all fields in the PPComplex
table. This case is probably only likely in the case where the same complex definition is being added repeatedly
e.g. if a data import script is being run over and over again. Next, we check whether a complex exists based on
the PDB set i.e. whether a complex using the same PDB chains exists in the database.
Note that this function will NOT detect cases where the same complex is being used as an existing complex in the
database but where there are differences in the partner names and a different PDB file is being specified. Therefore,
care must still be taken when adding complexes to the database to ensure that we do not store duplicate definitions.
This function is mainly useful as a helper function for add_complex to avoid hitting fail branches when force == False
in that function. It results in cleaner handling of attempts to re-add existing data.
Note: We ignore the ChainIndex field in PPIPDBPartnerChain - i.e. we treat partner definitions as bags, not sequences
Returns: a dict mapping:
complex_id -> Dict(reverse_match -> Boolean, # reverse_match is None, True, or False and indicates whether or not the matched complex names (L, R) are in the same order
set_numbers -> List(dict(set_number -> set_number, reverse_match = Boolean))) # reverse_match here is True or False and indicates whether or not the matched PDB sets (L, R) are in the same order
'''
# todo: this part of the function currently only allows bound complexes as there is a single structure_id parameter
# todo: this is the part of the code to change to allow the function to handle unbound complexes
passed_pdb_set = dict(
L = sorted([(complex_details['structure_id'], c) for c in complex_details['LChains']]),
R = sorted([(complex_details['structure_id'], c) for c in complex_details['RChains']])
)
complex_id = None
complex_reverse_match = None
# Try for an exact match
# This branch is only useful when the user is adding the same definition multiple times i.e. the same names for the complex.
# This is mostly hit when import scripts are run multiple times.
complex_record = get_or_create_in_transaction(tsession, dbmodel.PPComplex, complex_details, variable_columns = ['ID'], only_use_supplied_columns = True, read_only = True)
if complex_record:
results = [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LName == complex_details['LName'], dbmodel.PPComplex.RName == complex_details['RName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LShortName == complex_details['LShortName'], dbmodel.PPComplex.RShortName == complex_details['RShortName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LHTMLName == complex_details['LHTMLName'], dbmodel.PPComplex.RHTMLName == complex_details['RHTMLName']))]
complex_ids = sorted(set([r.ID for r in results]))
if complex_ids:
if not len(complex_ids) == 1:
raise colortext.Exception('WARNING: Multiple complex definitions (PPComplex.ID = {0}) share the same partner names. This indicates a redundancy in the database.'.format(', '.join(complex_ids)))
complex_id = complex_ids[0]
complex_record = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == complex_id).one()
complex_reverse_match = False
results = [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LName == complex_details['RName'], dbmodel.PPComplex.RName == complex_details['LName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LShortName == complex_details['RShortName'], dbmodel.PPComplex.RShortName == complex_details['LShortName']))]
results += [r for r in tsession.query(dbmodel.PPComplex).filter(and_(dbmodel.PPComplex.LHTMLName == complex_details['RHTMLName'], dbmodel.PPComplex.LHTMLName == complex_details['LHTMLName']))]
complex_ids = sorted(set([r.ID for r in results]))
if complex_ids:
if (complex_id != None) or (len(complex_ids) != 1):
raise colortext.Exception('WARNING: Multiple complex definitions (PPComplex.ID = {0}) share the same partner names. This indicates a redundancy in the database.'.format(', '.join(complex_ids)))
complex_id = complex_ids[0]
complex_record = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == complex_id).one()
complex_reverse_match = True
if complex_record:
# We found an associated PPComplex record. Now we check to see whether an associated PPIPDBSet exists
complex_id = complex_record.ID
# todo: this part of the function allows unbound complexes and does not need to be updated
set_number_hits = self.lookup_pdb_set(tsession, passed_pdb_set, allow_partial_matches = allow_partial_matches, complex_id = complex_id)
# One exact hit for the complex definition with one or many PDB sets
l = []
for h in set_number_hits:
assert(h[0] == complex_id)
set_number = h[1]
reverse_match = h[2]
assert(complex_reverse_match == reverse_match)
l.append(dict(set_number = set_number, reverse_match = reverse_match))
return {complex_id : dict(reverse_match = complex_reverse_match, set_numbers = l)}
else:
# The complex did not exactly match a PPComplex record however there may simply be differences in the partner names.
# We proceed by looking for a match based on the PDB chains by checking all PDB sets.
set_number_hits = self.lookup_pdb_set(tsession, passed_pdb_set, allow_partial_matches = allow_partial_matches)
results_by_complex = {}
for h in set_number_hits:
complex_id = h[0]
set_number = h[1]
reverse_match = h[2]
results_by_complex[complex_id] = results_by_complex.get(complex_id, dict(reverse_match = None, set_numbers = []))
results_by_complex[complex_id]['set_numbers'].append(dict(set_number = set_number, reverse_match = reverse_match))
return results_by_complex
return None
@ppi_data_entry
def add_complex(self, complex_details, keywords = [], force = False, debug = False, tsession = None):
'''Add a complex to the database using a defined dict structure.
:param complex_details: A dict fitting the defined structure (see below).
:param keywords: A list of keywords used to search existing complexes for an existing match. Not necessary but
advised, particularly when adding a small number of complexes.
:param force: If a potentially similar complex is found and force is False then then the function returns with a
message and without adding the complex. The ForceAddition setting in the Complex dict (see below)
will have the same effect as setting this variable.
:param debug: If debug is set to True then the transaction used to insert the complex into the database will be
rolled back and a message stating that the insertion would have been successful is returned in the
return dict.
:return: On successful import, the dict
{success = True, ComplexID -> Long, SetNumber -> Long, ReverseMatch -> Boolean}
corresponding to the database PPIPDBSet primary key is returned. ReverseMatch is True if the complex was
found in the database with the same partner ordering (Left = Left, Right = Right) and False otherwise.
If a similar complex is detected and force is False then a dict
{success = False, ComplexID -> Long, SetNumber -> Long, ReverseMatch -> Boolean, message -> String}
will be returned instead.
On error, a dict {success = False, error -> String} is returned.
The database uses Unicode to encode the strings, allowing us to use e.g. Greek characters
For this reason, please contain all structure definitions in a file encoded as Unicode. On Linux, you can add the
# -*- coding: utf-8 -*-
declaration at the top of the file (with no leading whitespace).
One example of the dict structure is as follows:
dict(
# There are two cases - the complex exists in the database or we will be adding a new complex.
# Note: Before adding new complexes, you should make sure that there is no existing complex in the
# database. This will help to reduce redundancy and provide us with better data.
# These fields are required in both cases and specify the partners of the complex
# Note: Please ensure that the LChains (resp. RChains) chains correspond to the protein/complex
# identified by LName, LShortName, LHTMLName (resp. RName, RShortName, RHTMLName)
structure_id = '1A2K_TP0',
LChains = ['A'],
RChains = ['C'],
# Case 1: These fields should be used if there is an existing complex in the database.
ComplexID = 202,
# Case 2: These fields should only be used if there is no existing complex in the database.
AdditionalKeywords = ['GSP1'], # Used to search for existing complexes. The PDB ID, LName, LShortName, etc. fields will automatically be used for the search so there is no need to specify those.
LName = 'Ras-related nuclear protein', # the full protein name for the left partner. This is a Unicode field.
LShortName = 'RAN', # the short-hand name commonly used
LHTMLName = 'RAN', # a version of the short-hand name converted to HTML e.g. α used in place of an alpha character. This is an ASCII field.
RName = 'Ran-specific GTPase-activating protein', # similar
RShortName = 'RanGAP1', # similar
RHTMLName = 'RanGAP1', # similar
FunctionalClassID = 'OG', # One of A (Antibody-antigen), AB (Antigen/Bound Antibody), EI (Enzyme/inhibitor), ER (Enzyme containing complex),
# ES (Enzyme containing complex), OG (G-proteins), OR (Receptors), or OX (Miscellaneous)
PPDBMFunctionalClassID = 'O', # One of A (Antibody-antigen), AB (Antigen/Bound Antibody), E (Enzyme/Inhibitor or Enzyme/Substrate), or O (Miscellaneous)
PPDBMDifficulty = None, # specific to the protein-protein docking benchmark i.e. use None here
IsWildType = True, # if this is the wildtype sequence
WildTypeComplexID = None, # if this is not wildtype sequence and the wildtype complex is in the database, please specify that complex ID here
Notes = '...' # any notes on the complex e.g. 'There is a related complex in the database (complex #119 at the time of writing) with all three unique chains from 1K5D (AB|C).'
Warnings = None, # any warnings about the complex in general. Note: Structural warnings belong in the description field of the Structure dict.
# Optional fields for either case
PDBComplexNotes = '...' # any notes specific to the particular PDB structure rather than the complex
DatabaseKeys = [ # Used when adding complexes from databases to help map them back to that database
dict(
DatabaseName = "SKEMPI",
DatabaseKey = "1NCA_N_LH",
),
...
]
)
'''
# todo: this function currently only adds bound complexes (which is the typical case). It is straightforward to generalize the structure above for unbound complexes e.g. by changing LChains and RChains to include structure ids
existing_session = not(not(tsession))
tsession = tsession or self.importer.get_session(new_session = True, utf = True)
# Search for exact matches first, then partial matches
pp_complex = None
reverse_match = None
for match_param in [False, True]:
existing_complexes = self.lookup_complex_by_details(tsession, complex_details, allow_partial_matches = match_param)
if existing_complexes:
if len(existing_complexes) == 1:
existing_complex_id = existing_complexes.keys()[0]
pp_complex = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == existing_complex_id)
if 'ComplexID' in complex_details:
if complex_details['ComplexID'] != pp_complex.ID:
raise colortext.Exception('ComplexID {0} was passed but complex #{1} was found which seems to match the complex definition.'.format(complex_details['ComplexID'], pp_complex.ID))
reverse_match = existing_complexes[existing_complex_id]['reverse_match']
existing_pdb_sets = existing_complexes[existing_complex_id]['set_numbers']
if existing_pdb_sets:
if len(existing_pdb_sets) == 1:
existing_pdb_set = existing_pdb_sets[0]
msg = None
if match_param == True:
msg = 'A match was found on the partner/PDB set definition but the complex fields had different values e.g. different names of each partner.'
if not force:
return dict(success = False, message = msg, ComplexID = existing_complex_id, SetNumber = existing_pdb_set['set_number'], ReverseMatch = existing_pdb_set['reverse_match'])
else:
colortext.warning(msg)
return dict(success = True, message = msg, ComplexID = existing_complex_id, SetNumber = existing_pdb_set['set_number'], ReverseMatch = existing_pdb_set['reverse_match'])
return dict(success = True, ComplexID = existing_complex_id, SetNumber = existing_pdb_set['set_number'], ReverseMatch = existing_pdb_set['reverse_match'])
else:
raise colortext.Exception('The complex definition exists in the database but multiple PDB sets / partner definitions match the passed parameters. Check this case manually.')
else:
# If force is not passed, raise an exception. Else, cascade into the new partner definition creation below.
if not force:
raise colortext.Exception('The complex definition exists in the database although no PDB sets / partner definitions corresponding EXACTLY to the partner definition were found. Check this case manually to see whether existing definitions would suit better than the passed definition (else, the force parameter can be passed to force creation of a new definition).')
else:
raise colortext.Exception('Multiple complex definitions exists in the database which match the passed complex definition. Check this case manually.')
# We have not found an exact match or (if force == True) a similar match has been found.
# If force is False and a similar complex was found, we should have raise an exception above.
try:
assert('DatabaseKeys' not in complex_details) # todo: write this code
# Check parameters
passed_keys = sorted(complex_details.keys())
expected_keys = ['structure_id', 'LChains', 'RChains']
for k in expected_keys:
assert(k in complex_details)
structure_id, LChains, RChains = complex_details['structure_id'], complex_details['LChains'], complex_details['RChains']
# Check that the structure is already in the database
structure_record = None
try:
structure_record = tsession.query(dbmodel.PDBFile).filter(dbmodel.PDBFile.ID == structure_id).one()
except:
raise Exception('The structure "{0}" does not exist in the database.'.format(structure_id))
# Add the PPComplex record
if pp_complex:
if reverse_match == True:
raise Exception('Write this case. We should add the passed chains in the opposite order (L = R, R = L) since the found complex has the opposite partner ordering.')
else:
assert(force)
assert(reverse_match == False) # i.e. it is not equal to None
else:
pp_complex = None
if 'ComplexID' in complex_details:
expected_keys.append('ComplexID')
if (('PDBComplexNotes' in complex_details) and len(complex_details) != 5) or (('PDBComplexNotes' not in complex_details) and (len(complex_details) != 4)):
raise Exception('As the ComplexID was specified, the only expected fields were "{0}" but "{1}" were passed.'.format('", "'.join(sorted(expected_keys)), '", "'.join(passed_keys)))
pp_complex = tsession.query(dbmodel.PPComplex).filter(dbmodel.PPComplex.ID == complex_details['ComplexID']).one()
else:
keywords = keywords + [complex_details['LName'], complex_details['LShortName'], complex_details['LHTMLName'], complex_details['RName'], complex_details['RShortName'], complex_details['RHTMLName']]
if complex_details.get('AdditionalKeywords'):
keywords.extend(complex_details['AdditionalKeywords'])
possible_matches = self.find_complex([structure_id], keywords, tsession = tsession)
if possible_matches:
if not force:
return dict(success = False, debug = debug, error = 'Complexes exist in the database which may be related. Please check whether any of these complexes match your case.', possible_matches = possible_matches)
colortext.warning('Complexes exist in the database which may be related. Continuing to add a new complex regardless.')
pp_complex = get_or_create_in_transaction(tsession, dbmodel.PPComplex, dict(
LName = complex_details['LName'],
LShortName = complex_details['LShortName'],
LHTMLName = complex_details['LHTMLName'],
RName = complex_details['RName'],
RShortName = complex_details['RShortName'],
RHTMLName = complex_details['RHTMLName'],
FunctionalClassID = complex_details['FunctionalClassID'],
PPDBMFunctionalClassID = complex_details['PPDBMFunctionalClassID'],
PPDBMDifficulty = complex_details['PPDBMDifficulty'],
IsWildType = complex_details['IsWildType'],
WildTypeComplexID = complex_details['WildTypeComplexID'],
Notes = complex_details['Notes'],
Warnings = complex_details['Warnings'],
), missing_columns = ['ID'])
# Search for an existing PDB set. Read the current definitions, treating them as bags then sorting lexically
pdb_sets = {}
for pschain in tsession.query(dbmodel.PPIPDBPartnerChain).filter(dbmodel.PPIPDBPartnerChain.PPComplexID == pp_complex.ID):
pdb_sets[pschain.SetNumber] = pdb_sets.get(pschain.SetNumber, {'L' : [], 'R' : []})
pdb_sets[pschain.SetNumber][pschain.Side].append((pschain.PDBFileID, pschain.Chain))
# Create a bag from the new definition then sort lexically
new_pdb_set = dict(L = sorted([(structure_id, c) for c in LChains]),
R = sorted([(structure_id, c) for c in RChains]))
# Check whether an exact match already exists
matching_set, reverse_match = None, None
for set_number, set_def in pdb_sets.iteritems():
set_def['L'] = sorted(set_def['L'])
set_def['R'] = sorted(set_def['R'])
if set_def['L'] == new_pdb_set['L'] and set_def['R'] == new_pdb_set['R']:
matching_set, reverse_match = True, False
elif set_def['L'] == new_pdb_set['R'] and set_def['R'] == new_pdb_set['L']:
matching_set, reverse_match = True, True
if matching_set:
pdb_set = tsession.query(dbmodel.PPIPDBSet).filter(and_(dbmodel.PPIPDBSet.PPComplexID == pp_complex.ID, dbmodel.PPIPDBSet.SetNumber == set_number)).one()
return dict(success = True, ReverseMatch = reverse_match, ComplexID = pp_complex.ID, SetNumber = set_number) # this used to also return PPIPDBSet = pdb_set
# No match. Create a new set by adding a PPIPDBSet record.
if pdb_sets:
new_set_number = max(pdb_sets.keys()) + 1
else:
new_set_number = 0
assert(tsession.query(dbmodel.PPIPDBSet).filter(and_(dbmodel.PPIPDBSet.PPComplexID == pp_complex.ID, dbmodel.PPIPDBSet.SetNumber == new_set_number)).count() == 0) # Sanity check
pdb_complex_notes = None
if 'PDBComplexNotes' in complex_details:
pdb_complex_notes = complex_details['PDBComplexNotes']
pdb_set_object = get_or_create_in_transaction(tsession, dbmodel.PPIPDBSet,
dict(
PPComplexID = pp_complex.ID,
SetNumber = new_set_number,
IsComplex = True, # todo: change when we allow unbound complexes
Notes = pdb_complex_notes,
))
# Create the associated PPIPDBPartnerChain records
for set_side, side_chains in sorted(new_pdb_set.iteritems()):
chain_index = 0
for pc in sorted(side_chains):
get_or_create_in_transaction(tsession, dbmodel.PPIPDBPartnerChain,
dict(
PPComplexID = pp_complex.ID,
SetNumber = new_set_number,
Side = set_side,
ChainIndex = chain_index,
PDBFileID = pc[0],
Chain = pc[1],
NMRModel = None, # todo
), missing_columns = ['ID'])
chain_index += 1
# Return the API response
api_response = dict(success = True, ReverseMatch = False, PPIPDBSet = pdb_set_object, ComplexID = pp_complex.ID, SetNumber = new_set_number) # this used to also return PPIPDBSet = pdb_set_object
if not(existing_session):
if debug:
api_response = dict(success = False, debug = debug, error = 'Debug call - rolling back the transaction.')
tsession.rollback()
tsession.close()
else:
tsession.commit()
tsession.close()
return api_response
except:
colortext.error('Failure.')
print(traceback.format_exc())
tsession.rollback()
tsession.close()
raise
@ppi_data_entry
def add_user_dataset_case(self, tsession, user_dataset_case, user_dataset_name_to_id_map = {}):
'''Add a user dataset case to the database using a defined dict structure.
:param tsession: A transaction session. This must be created and passed into this function as user datasets should
be added in one transaction.
:param user_dataset_case: A single case for the user dataset matching the structure defined below.
:param user_dataset_name_to_id_map: Used to cache the mapping from user dataset names to their integer IDs
:return: On success, the UserDataSetExperiment object is returned.
user_dataset_case should be structured as in the following example:
dict(
# These records are used to create a PPMutagenesis record and the associated mutagenesis details
Mutagenesis = dict(
RecognizableString = 'TinaGSP_32',
PPComplexID = -1,
),
Mutations = [
# There is one dict per mutation
dict(
MutagenesisMutation = dict(
# PPMutagenesisID will be filled in when the PPMutagenesis record is created.
RecordKey = 'A D123E', # chain_id, wildtype_aa, residue_id.strip(), mutant_aa
ProteinID = None, # todo
ResidueIndex = None, # todo
WildTypeAA = 'D',
MutantAA = 'E',
),
MutagenesisPDBMutation = dict(
# PPMutagenesisID and PPMutagenesisMutationID will be filled in when the PPMutagenesisMutation record is created.
# PPComplexID is taken from the PPMutagenesis section. WildTypeAA and MutantAA are taken from the PPMutagenesisMutation section.
SetNumber = -1,
PDBFileID = '1A2K_TP0',
Chain = 'A',
ResidueID = ' 123 ',
),
),
],
# This field is used to create the UserPPDataSetExperiment record. All other fields can be derived from the above.
# Note: We use the human-readable label here. The database ID is retrieved using e.g. ppi_api.get_defined_user_datasets()[<UserDataSetTextID>]['ID']
UserDataSetTextID = 'RAN-GSP',
)
'''
udc = user_dataset_case
# Extract the PDB file and complex set number
pdb_file_id = set([m['MutagenesisPDBMutation']['PDBFileID'] for m in udc['Mutations']])
assert(len(pdb_file_id) == 1)
pdb_file_id = pdb_file_id.pop()
set_number = set([m['MutagenesisPDBMutation']['SetNumber'] for m in udc['Mutations']])
assert(len(set_number) == 1)
set_number = set_number.pop()
is_wildtype = 1
if udc['Mutations']:
is_wildtype = 0
# 1. Create the mutagenesis record
pp_mutagenesis = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesis, dict(
PPComplexID = udc['Mutagenesis']['PPComplexID'],
SKEMPI_KEY = udc['Mutagenesis']['RecognizableString'],
WildType = is_wildtype,
), missing_columns = ['ID'])
# 2. Create the PPMutagenesisMutation and PPMutagenesisPDBMutation records
for m in udc['Mutations']:
# 2a. Create the PPMutagenesisMutation record
mmut = m['MutagenesisMutation']
mmut['PPMutagenesisID'] = pp_mutagenesis.ID
# Sanity check existing records
existing_record = tsession.query(dbmodel.PPMutagenesisMutation).filter(and_(
dbmodel.PPMutagenesisMutation.PPMutagenesisID == mmut['PPMutagenesisID'], dbmodel.PPMutagenesisMutation.RecordKey == mmut['RecordKey']))
if existing_record.count() > 0:
existing_record = existing_record.one()
assert(existing_record.MutantAA == mmut['MutantAA'])
assert(existing_record.WildTypeAA == mmut['WildTypeAA'])
# Add the new record
pp_mutagenesis_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisMutation, mmut, missing_columns = ['ID'])
# 2b. Create the PPMutagenesisPDBMutation record
pmut = m['MutagenesisPDBMutation']
pmut['PPMutagenesisID'] = pp_mutagenesis.ID
pmut['PPMutagenesisMutationID'] = pp_mutagenesis_mutation.ID
pmut['PPComplexID'] = pp_mutagenesis.PPComplexID
pmut['WildTypeAA'] = pp_mutagenesis_mutation.WildTypeAA
pmut['MutantAA'] = pp_mutagenesis_mutation.MutantAA
pmut['ResidueID'] = PDB.ResidueID2String(pmut['ResidueID']) # handle stripped strings
# Sanity check existing records
existing_record = tsession.query(dbmodel.PPMutagenesisPDBMutation).filter(and_(
dbmodel.PPMutagenesisPDBMutation.PPMutagenesisMutationID == pmut['PPMutagenesisMutationID'],
dbmodel.PPMutagenesisPDBMutation.PDBFileID == pdb_file_id,
dbmodel.PPMutagenesisPDBMutation.SetNumber == set_number,
dbmodel.PPMutagenesisPDBMutation.Chain == pmut['Chain'],
dbmodel.PPMutagenesisPDBMutation.ResidueID == pmut['ResidueID'],
))
if existing_record.count() > 0:
existing_record = existing_record.one()
assert(existing_record.PPMutagenesisID == pmut['PPMutagenesisID'])
assert(existing_record.PPComplexID == pmut['PPComplexID'])
assert(existing_record.WildTypeAA == pmut['WildTypeAA'])
assert(existing_record.MutantAA == pmut['MutantAA'])
# Add the new record
pp_mutagenesis_pdb_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisPDBMutation, pmut, missing_columns = ['ID'])
# 3. Create the UserPPDataSetExperiment record
user_dataset_name = udc['UserDataSetTextID']
if not user_dataset_name_to_id_map.get(user_dataset_name):
user_dataset_name_to_id_map[user_dataset_name] = tsession.query(dbmodel.UserDataSet).filter(dbmodel.UserDataSet.TextID == user_dataset_name).one().ID
user_dataset_id = user_dataset_name_to_id_map[user_dataset_name]
new_record = True
if tsession.query(dbmodel.UserPPDataSetExperiment).filter(and_(
dbmodel.UserPPDataSetExperiment.UserDataSetID == user_dataset_id,
dbmodel.UserPPDataSetExperiment.PPMutagenesisID == pp_mutagenesis.ID,
dbmodel.UserPPDataSetExperiment.PDBFileID == pdb_file_id,
dbmodel.UserPPDataSetExperiment.PPComplexID == pp_mutagenesis.PPComplexID,
dbmodel.UserPPDataSetExperiment.SetNumber == set_number)).count() > 0:
new_record = False
user_dataset_experiment = get_or_create_in_transaction(tsession, dbmodel.UserPPDataSetExperiment, dict(
UserDataSetID = user_dataset_id,
PPMutagenesisID = pp_mutagenesis.ID,
PDBFileID = pdb_file_id,
PPComplexID = pp_mutagenesis.PPComplexID,
SetNumber = set_number,
IsComplex = True,
), missing_columns = ['ID'])
if new_record:
colortext.wgreen('.')
else:
colortext.wcyan('.')
@general_data_entry
def add_de_dataset(self, user_id, long_id, short_id, description, ddg_convention, dataset_creation_start_date = None, dataset_creation_end_date = None, publication_ids = [], existing_session = None):
'''Convenience wrapper for add_dataset for DeltaE-only datasets.'''
return self.add_dataset(user_id, long_id, short_id, description, False, False, True, ddg_convention, dataset_creation_start_date = dataset_creation_start_date, dataset_creation_end_date = dataset_creation_end_date, publication_ids = publication_ids, existing_session = existing_session)
@ppi_data_entry
def add_ssm_dataset(self, dataset_short_id, user_dataset_id, complex_id, set_number, mutations_dataframe, existing_session = None, debug = True):
'''Import SSM data from an RCSB PDB file. Non-RCSB files are not currently handled. Some data (DataSet and UserDataSet)
must be set up before calling this function.
:param dataset_short_id: The short ID of the existing dataset in the database (DataSet.ShortID)
:param user_dataset_id: The ID of the existing user dataset in the database (UserDataSet.ID)
:param pp_complex_id: The complex ID used in the database (PPComplex.ID). This will be used to add the structure to the database.
:param set_number: The set_number of the complex used in the database (PPIPDBSet.SetNumber). This is used to determine the choice of chains in predictions.
:param mutations_dataframe: A pandas dataframe in the intermediate input format described below.
:param debug: If True then the transaction is rolled back. This is set to True by default to reduce data-entry errors i.e. you should do a test-run of add_ssm_dataset first and then do a run with debug = False.
:return: Dict {success : <True/False>, DataSetID : dataset_id, [errors : <list of error strings if failed>]}
This function requires the complex, DataSet, and UserDataSet records to have been created. Those records can be added using
the appropriate functions e.g.
ppi_api = get_ppi_interface(read_file('pw'))
# If the complex structure has not been added to the database:
ppi_api.importer.add_pdb_from_rcsb(pdb_id, trust_database_content = True)
# If the complex has not been added to the database:
complex_ids = ppi_api.search_complexes_by_pdb_id(pdb_id)
if complex_ids:
colortext.warning('The PDB file {0} has associated complexes: {1}'.format(pdb_id, ', '.join(map(str, complex_ids))))
api_response = ppi_api.add_complex(json.loads(read_file('my_complex.json')[path][to][complex_definition])) # The structure of the JSON file is described in the docstring for add_complex
if not api_response['success']:
raise Exception(api_response['error'])
pp_complex_id, set_number = api_response['ComplexID'], api_response['SetNumber']
# else if the complex already exists in the database:
pp_complex_id, set_number = ..., ...
# Add dataset publications
publication_ids = [
ppi_api.add_publication(...).ID, # currently not implemented
...
ppi_api.add_publication(...).ID, # currently not implemented
]
# Add the dataset and user dataset records
dataset = ppi_api.add_de_dataset('oconchus', 'SSM_Psd95-CRIPT_Rama_10.1038/nature11500', 'Psd95-CRIPT', 'description...', ddg_convention, dataset_creation_start_date = datetime.date(...), dataset_creation_end_date = datetime.date(...), publication_ids = [...])
user_dataset = ppi_api.add_de_user_dataset('oconchus', 'SSM-Psd95-CRIPT', '...')
# Finally, import the SSM dataset
add_ssm_dataset(dataset.ShortID, user_dataset.ID, pp_complex_id, set_number, mutations_dataframe)
@todo: write the add_publication function (using the RIS parsing module in klab and the PubMed/DOI downloading modules).
mutations_dataframe should be a complete (either a value or null at all positions in the m x n array) pandas
dataframe with a standardized structure.
This simplifies the data import. The dataframe should be indexed/row-indexed by residue type and column-indexed
by a string chain ID + <underscore> + residue ID without spaces e.g. 'A_311' is residue ' 311 ' of chain A and 'A_312B' is residue ' 312B' of chain A.
We include an underscore in the format to reduce confusion for cases where the PDB chain ID is an integer.
For example, if the input file is a TSV formatted like:
Pos/aa A_311 A_312 ...
A 0.131 -0.42 ...
C 0.413 -0.022 ...
...
then a valid mutations_dataframe can be constructed via
mutations_dataframe = pandas.read_csv(ssm_input_data_path, sep = '\t', header = 0, index_col = 0)
'''
tsession = existing_session or self.get_session(new_session = True, utf = False)
# Sanity checks
assert(complex_id != None and set_number != None)
dataset_id = None
try:
dataset_id = tsession.query(dbmodel.DataSet).filter(dbmodel.DataSet.ShortID == dataset_short_id).one().ID
except:
raise Exception('No dataset with ShortID "{0}" exists in the database.'.format(dataset_short_id))
try:
tsession.query(dbmodel.UserDataSet).filter(dbmodel.UserDataSet.ID== user_dataset_id).one()
except:
raise Exception('No user dataset with TextID "{0}" exists in the database.'.format(user_dataset_id))
# Retrieve the mapping from chain -> residue ID -> wildtype residue
pdb_id, complex_chains = self.get_bound_pdb_set_details(complex_id, set_number)
chain_wt_residue_by_pos = self.get_pdb_residues_by_pos(pdb_id, strip_res_ids = True)
# Sanity checks on column indices
chain_ids = set()
for v in mutations_dataframe.columns.values:
error_msg = 'The column index "{0}" does not have the expected format: <chain>_<residue id> e.g. "A_123".'.format(v)
if v.find('_') == -1 or len(v.split('_')) != 2:
raise colortext.Exception(error_msg)
tokens = v.split('_')
chain_id = tokens[0]
residue_id = tokens[1]
if len(chain_id) != 1 or (not(residue_id.strip().isdigit()) and not(residue_id.strip()[:-1].isdigit())):
raise colortext.Exception(error_msg)
chain_ids.add(chain_id)
# Sanity checks on row indices
mut_aas = sorted(mutations_dataframe.index)
expected_mut_aas = set(residue_type_1to3_map.keys())
expected_mut_aas.remove('X')
assert(len(expected_mut_aas) == 20)
if set(mut_aas).difference(expected_mut_aas):
raise colortext.Exception('The row indices contain values which are non canonical residue types: "{0}".'.format('", "'.join(sorted(set(mut_aas).difference(expected_mut_aas)))))
# Extract the data into a list of point mutations, iterating by column/position then row/AA
# Add a single wildtype PPMutagenesis record (essentially a Complex with no corresponding mutation records)
# For all single PDB mutations in the list
# if not wildtype
# add a PPMutagenesis record and corresponding mutation records
# add a PPIDataSetDE record to represent the original data (experimental data) in the database
# add a UserPPDataSetExperiment record to be used to create prediction runs
# add a UserPPAnalysisSetDE record to be used when analyzing prediction runs against the experimental data
#
# Note that there will be one UserPPAnalysisSetDE record for each mutant but only one record for wildtype even though
# the wildtype sequence has exactly one corresponding DeltaE for each position. There will be exactly one UserPPAnalysisSetDE
# record per mutant and one wildtype record for each position however all of the wildtype UserPPAnalysisSetDE records
# will be associated to the sole wildtype UserPPAnalysisSetDE record.
colortext.warning('Adding data for complex #{0}, dataset "{1}", user dataset #{2}.'.format(complex_id, dataset_id, user_dataset_id))
record_number = 0
mut_aas = list(mutations_dataframe.index)
res_ids = list(mutations_dataframe.columns.values)
try:
# Add a PPMutagenesis record with no mutation records i.e. the wildtype/null 'mutagenesis'
pp_wt_mutagenesis = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesis, dict(
PPComplexID = complex_id,
SKEMPI_KEY = 'SSM {0}| WildType'.format(pdb_id), # todo: this format is ambiguous if we start to store multiple SSM datasets with different choices of bound partners. We should ideally check all PPMutagenesisMutation/PPMutagenesisPDBMutation records on the complex for a match. At present (2016), it is unlikely that we will have many SSM datasets for consideration, never mind overlapping sets.
WildType = 1,
), missing_columns = ['ID',])
pp_wt_mutagenesis_id = pp_wt_mutagenesis.ID
first_wt_record_number = None
for chain_res_id in res_ids:
tokens = chain_res_id.split('_')
assert(len(tokens) == 2)
chain_id = tokens[0]
assert(len(chain_id) == 1)
assert(chain_id in chain_wt_residue_by_pos)
res_id = tokens[1]
assert(res_id in chain_wt_residue_by_pos[chain_id])
wt_aa = chain_wt_residue_by_pos[chain_id][res_id]
for mut_aa in mut_aas:
record_number += 1
if record_number % 10 == 0:
colortext.wgreen('.')
sys.stdout.flush()
# Add the PPMutagenesis records for mutant cases
if mut_aa == wt_aa:
ppi_dataset_de_key = 'SSM {0}| WildType'.format(pdb_id)
if first_wt_record_number == None:
first_wt_record_number = record_number
analysis_set_record_number = first_wt_record_number
pp_mutagenesis_id = pp_wt_mutagenesis_id
else:
ppi_dataset_de_key = 'SSM {0}| {1} {2} {3} {4}'.format(pdb_id, chain_id, wt_aa, res_id, mut_aa) # SKEMPI_KEY is a bad name for a field!,
analysis_set_record_number = record_number
# Add a PPMutagenesis record with no mutation records i.e. the wildtype/null 'mutagenesis'
pp_mutagenesis = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesis, dict(
PPComplexID = complex_id,
SKEMPI_KEY = 'SSM {0}| {1} {2} {3} {4}'.format(pdb_id, chain_id, wt_aa, res_id, mut_aa), # SKEMPI_KEY is a bad name for a field!,
WildType = 0,
), missing_columns = ['ID'])
pp_mutagenesis_id = pp_mutagenesis.ID
#pprint.pprint(pp_mutagenesis.__dict__)
pp_mutagenesis_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisMutation, dict(
PPMutagenesisID = pp_mutagenesis_id,
RecordKey = '{0} {1}{2}{3}'.format(chain_id, wt_aa, res_id, mut_aa),
ProteinID = None,
ResidueIndex = None,
WildTypeAA = wt_aa,
MutantAA = mut_aa,
), missing_columns = ['ID',])
pp_mutagenesis_mutation_id = pp_mutagenesis_mutation.ID
#pprint.pprint(pp_mutagenesis_mutation.__dict__)
pp_mutagenesis_pdb_mutation = get_or_create_in_transaction(tsession, dbmodel.PPMutagenesisPDBMutation, dict(
PPMutagenesisID = pp_mutagenesis_id,
PPMutagenesisMutationID = pp_mutagenesis_mutation_id,
PPComplexID = complex_id,
SetNumber = set_number,
PDBFileID = pdb_id,
Chain = chain_id,
WildTypeAA = wt_aa,
ResidueID = PDB.ResidueID2String(res_id),
MutantAA = mut_aa,
), missing_columns = ['ID',])
pp_mutagenesis_pdb_mutation_id = pp_mutagenesis_pdb_mutation.ID
#pprint.pprint(pp_mutagenesis_pdb_mutation.__dict__)
# Add a DeltaE measurement record (PPIDataSetDE)
ppi_dataset_de = get_or_create_in_transaction(tsession, dbmodel.PPIDataSetDE, dict(
SecondaryID = ppi_dataset_de_key, # optional field
DataSetID = dataset_id,
Section = 'Supplementary Information II',
RecordNumber = record_number,
DE = mutations_dataframe[chain_res_id][mut_aa],
DEUnit = 'DeltaE (see DataSet.Description)',
PublishedError = None,
NumberOfMeasurements = None,
PPMutagenesisID = pp_mutagenesis_id,
PPComplexID = complex_id,
SetNumber = set_number,
PublishedPDBFileID = pdb_id,
PossibleError = False,
Remarks = None,
IsABadEntry = 0,
AddedBy = 'oconchus',
AddedDate = datetime.datetime.now(),
LastModifiedBy = 'oconchus',
LastModifiedDate = datetime.datetime.now(),
), missing_columns = ['ID',], variable_columns = ['AddedDate', 'LastModifiedDate'])
ppi_dataset_de_id = ppi_dataset_de.ID
# Add a record (UserPPDataSetExperiment) to be included in the associated prediction run
user_pp_dataset_experiment = get_or_create_in_transaction(tsession, dbmodel.UserPPDataSetExperiment, dict(
UserDataSetID = user_dataset_id,
PPMutagenesisID = pp_mutagenesis_id,
PDBFileID = pdb_id,
PPComplexID = complex_id,
SetNumber = set_number,
IsComplex = 1
), missing_columns = ['ID',])
user_pp_dataset_experiment_id = user_pp_dataset_experiment.ID
# dd a record (UserPPAnalysisSetDE) to be used in the analysis, linking the UserPPDataSetExperiment with the DeltaE (PPIDataSetDE) record
user_pp_analysis_set_de = get_or_create_in_transaction(tsession, dbmodel.UserPPAnalysisSetDE, dict(
Subset = 'Psd95-Cript',
Section = 'McLaughlin2012',
RecordNumber = analysis_set_record_number,
UserPPDataSetExperimentID = user_pp_dataset_experiment_id,
PPIDataSetDEID = ppi_dataset_de_id,
PPMutagenesisID = pp_mutagenesis_id,
), missing_columns = ['ID',])
user_pp_analysis_set_de_id = user_pp_analysis_set_de.ID
if debug:
colortext.warning('\nDEBUG MODE IS SET. THE CODE RAN SUCCESSFULLY BUT THE DATASET WILL NOT BE ADDED. RE-RUN THIS FUNCTION WITH debug = False.')
tsession.rollback()
tsession.close()
else:
tsession.commit()
tsession.close()
except Exception, e:
tsession.rollback()
tsession.close()
colortext.warning(traceback.format_exc())
raise colortext.Exception(str(e))
|
Kortemme-Lab/kddg
|
kddg/api/ppi.py
|
Python
|
mit
| 189,894
|
[
"PyMOL"
] |
6dc727c32171db6915abfe738876e387a2b77bc227492542ecd237ee5270af7e
|
# -*- test-case-name: twisted.web.test.test_util -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An assortment of web server-related utilities.
"""
from __future__ import division, absolute_import
import linecache
from twisted.python import urlpath
from twisted.python.compat import _PY3, unicode, nativeString, escape
from twisted.python.reflect import fullyQualifiedName
from twisted.python.modules import getModule
from twisted.web import resource
from twisted.web.template import TagLoader, XMLFile, Element, renderer
from twisted.web.template import flattenString
def _PRE(text):
"""
Wraps <pre> tags around some text and HTML-escape it.
This is here since once twisted.web.html was deprecated it was hard to
migrate the html.PRE from current code to twisted.web.template.
For new code consider using twisted.web.template.
@return: Escaped text wrapped in <pre> tags.
@rtype: C{str}
"""
return '<pre>%s</pre>' % (escape(text),)
def redirectTo(URL, request):
"""
Generate a redirect to the given location.
@param URL: A L{bytes} giving the location to which to redirect.
@type URL: L{bytes}
@param request: The request object to use to generate the redirect.
@type request: L{IRequest<twisted.web.iweb.IRequest>} provider
@raise TypeError: If the type of C{URL} a L{unicode} instead of L{bytes}.
@return: A C{bytes} containing HTML which tries to convince the client agent
to visit the new location even if it doesn't respect the I{FOUND}
response code. This is intended to be returned from a render method,
eg::
def render_GET(self, request):
return redirectTo(b"http://example.com/", request)
"""
if isinstance(URL, unicode) :
raise TypeError("Unicode object not allowed as URL")
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
request.redirect(URL)
content = """
<html>
<head>
<meta http-equiv=\"refresh\" content=\"0;URL=%(url)s\">
</head>
<body bgcolor=\"#FFFFFF\" text=\"#000000\">
<a href=\"%(url)s\">click here</a>
</body>
</html>
""" % {'url': nativeString(URL)}
if _PY3:
content = content.encode("utf8")
return content
class Redirect(resource.Resource):
isLeaf = True
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
return redirectTo(self.url, request)
def getChild(self, name, request):
return self
class ChildRedirector(Redirect):
isLeaf = 0
def __init__(self, url):
# XXX is this enough?
if ((url.find('://') == -1)
and (not url.startswith('..'))
and (not url.startswith('/'))):
raise ValueError("It seems you've given me a redirect (%s) that is a child of myself! That's not good, it'll cause an infinite redirect." % url)
Redirect.__init__(self, url)
def getChild(self, name, request):
newUrl = self.url
if not newUrl.endswith('/'):
newUrl += '/'
newUrl += name
return ChildRedirector(newUrl)
class ParentRedirect(resource.Resource):
"""
I redirect to URLPath.here().
"""
isLeaf = 1
def render(self, request):
return redirectTo(urlpath.URLPath.fromRequest(request).here(), request)
def getChild(self, request):
return self
class DeferredResource(resource.Resource):
"""
I wrap up a Deferred that will eventually result in a Resource
object.
"""
isLeaf = 1
def __init__(self, d):
resource.Resource.__init__(self)
self.d = d
def getChild(self, name, request):
return self
def render(self, request):
self.d.addCallback(self._cbChild, request).addErrback(
self._ebChild,request)
from twisted.web.server import NOT_DONE_YET
return NOT_DONE_YET
def _cbChild(self, child, request):
request.render(resource.getChildForRequest(child, request))
def _ebChild(self, reason, request):
request.processingFailed(reason)
return reason
class _SourceLineElement(Element):
"""
L{_SourceLineElement} is an L{IRenderable} which can render a single line of
source code.
@ivar number: A C{int} giving the line number of the source code to be
rendered.
@ivar source: A C{str} giving the source code to be rendered.
"""
def __init__(self, loader, number, source):
Element.__init__(self, loader)
self.number = number
self.source = source
@renderer
def sourceLine(self, request, tag):
"""
Render the line of source as a child of C{tag}.
"""
return tag(self.source.replace(' ', u' \N{NO-BREAK SPACE}'))
@renderer
def lineNumber(self, request, tag):
"""
Render the line number as a child of C{tag}.
"""
return tag(str(self.number))
class _SourceFragmentElement(Element):
"""
L{_SourceFragmentElement} is an L{IRenderable} which can render several lines
of source code near the line number of a particular frame object.
@ivar frame: A L{Failure<twisted.python.failure.Failure>}-style frame object
for which to load a source line to render. This is really a tuple
holding some information from a frame object. See
L{Failure.frames<twisted.python.failure.Failure>} for specifics.
"""
def __init__(self, loader, frame):
Element.__init__(self, loader)
self.frame = frame
def _getSourceLines(self):
"""
Find the source line references by C{self.frame} and yield, in source
line order, it and the previous and following lines.
@return: A generator which yields two-tuples. Each tuple gives a source
line number and the contents of that source line.
"""
filename = self.frame[1]
lineNumber = self.frame[2]
for snipLineNumber in range(lineNumber - 1, lineNumber + 2):
yield (snipLineNumber,
linecache.getline(filename, snipLineNumber).rstrip())
@renderer
def sourceLines(self, request, tag):
"""
Render the source line indicated by C{self.frame} and several
surrounding lines. The active line will be given a I{class} of
C{"snippetHighlightLine"}. Other lines will be given a I{class} of
C{"snippetLine"}.
"""
for (lineNumber, sourceLine) in self._getSourceLines():
newTag = tag.clone()
if lineNumber == self.frame[2]:
cssClass = "snippetHighlightLine"
else:
cssClass = "snippetLine"
loader = TagLoader(newTag(**{"class": cssClass}))
yield _SourceLineElement(loader, lineNumber, sourceLine)
class _FrameElement(Element):
"""
L{_FrameElement} is an L{IRenderable} which can render details about one
frame from a L{Failure<twisted.python.failure.Failure>}.
@ivar frame: A L{Failure<twisted.python.failure.Failure>}-style frame object
for which to load a source line to render. This is really a tuple
holding some information from a frame object. See
L{Failure.frames<twisted.python.failure.Failure>} for specifics.
"""
def __init__(self, loader, frame):
Element.__init__(self, loader)
self.frame = frame
@renderer
def filename(self, request, tag):
"""
Render the name of the file this frame references as a child of C{tag}.
"""
return tag(self.frame[1])
@renderer
def lineNumber(self, request, tag):
"""
Render the source line number this frame references as a child of
C{tag}.
"""
return tag(str(self.frame[2]))
@renderer
def function(self, request, tag):
"""
Render the function name this frame references as a child of C{tag}.
"""
return tag(self.frame[0])
@renderer
def source(self, request, tag):
"""
Render the source code surrounding the line this frame references,
replacing C{tag}.
"""
return _SourceFragmentElement(TagLoader(tag), self.frame)
class _StackElement(Element):
"""
L{_StackElement} renders an L{IRenderable} which can render a list of frames.
"""
def __init__(self, loader, stackFrames):
Element.__init__(self, loader)
self.stackFrames = stackFrames
@renderer
def frames(self, request, tag):
"""
Render the list of frames in this L{_StackElement}, replacing C{tag}.
"""
return [
_FrameElement(TagLoader(tag.clone()), frame)
for frame
in self.stackFrames]
class FailureElement(Element):
"""
L{FailureElement} is an L{IRenderable} which can render detailed information
about a L{Failure<twisted.python.failure.Failure>}.
@ivar failure: The L{Failure<twisted.python.failure.Failure>} instance which
will be rendered.
@since: 12.1
"""
loader = XMLFile(getModule(__name__).filePath.sibling("failure.xhtml"))
def __init__(self, failure, loader=None):
Element.__init__(self, loader)
self.failure = failure
@renderer
def type(self, request, tag):
"""
Render the exception type as a child of C{tag}.
"""
return tag(fullyQualifiedName(self.failure.type))
@renderer
def value(self, request, tag):
"""
Render the exception value as a child of C{tag}.
"""
return tag(unicode(self.failure.value).encode('utf8'))
@renderer
def traceback(self, request, tag):
"""
Render all the frames in the wrapped
L{Failure<twisted.python.failure.Failure>}'s traceback stack, replacing
C{tag}.
"""
return _StackElement(TagLoader(tag), self.failure.frames)
def formatFailure(myFailure):
"""
Construct an HTML representation of the given failure.
Consider using L{FailureElement} instead.
@type myFailure: L{Failure<twisted.python.failure.Failure>}
@rtype: C{bytes}
@return: A string containing the HTML representation of the given failure.
"""
result = []
flattenString(None, FailureElement(myFailure)).addBoth(result.append)
if isinstance(result[0], bytes):
# Ensure the result string is all ASCII, for compatibility with the
# default encoding expected by browsers.
return result[0].decode('utf-8').encode('ascii', 'xmlcharrefreplace')
result[0].raiseException()
__all__ = [
"redirectTo", "Redirect", "ChildRedirector", "ParentRedirect",
"DeferredResource", "FailureElement", "formatFailure"]
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/twisted/web/util.py
|
Python
|
mit
| 10,875
|
[
"VisIt"
] |
f4b9cf9120c14099dc7eb914713683fa7eb534eab96da95ce0cd3ce2dd16e3e6
|
# Copyright (c) 2004 Canonical Limited
# Author: Robert Collins <robert.collins@canonical.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
import sys
import logging
import unittest
class LogCollector(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.records=[]
def emit(self, record):
self.records.append(record.getMessage())
def makeCollectingLogger():
"""I make a logger instance that collects its logs for programmatic analysis
-> (logger, collector)"""
logger=logging.Logger("collector")
handler=LogCollector()
handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(handler)
return logger, handler
def visitTests(suite, visitor):
"""A foreign method for visiting the tests in a test suite."""
for test in suite._tests:
#Abusing types to avoid monkey patching unittest.TestCase.
# Maybe that would be better?
try:
test.visit(visitor)
except AttributeError:
if isinstance(test, unittest.TestCase):
visitor.visitCase(test)
elif isinstance(test, unittest.TestSuite):
visitor.visitSuite(test)
visitTests(test, visitor)
else:
print ("unvisitable non-unittest.TestCase element %r (%r)" % (test, test.__class__))
class TestSuite(unittest.TestSuite):
"""I am an extended TestSuite with a visitor interface.
This is primarily to allow filtering of tests - and suites or
more in the future. An iterator of just tests wouldn't scale..."""
def visit(self, visitor):
"""visit the composite. Visiting is depth-first.
current callbacks are visitSuite and visitCase."""
visitor.visitSuite(self)
visitTests(self, visitor)
class TestLoader(unittest.TestLoader):
"""Custome TestLoader to set the right TestSuite class."""
suiteClass = TestSuite
class TestVisitor(object):
"""A visitor for Tests"""
def visitSuite(self, aTestSuite):
pass
def visitCase(self, aTestCase):
pass
|
freyes/percona-xtradb-cluster-5.5
|
python-for-subunit2junitxml/subunit/tests/TestUtil.py
|
Python
|
gpl-2.0
| 2,794
|
[
"VisIt"
] |
d1859917be893690f041103fbe320f6064164b2a23bfb6504d6adda6c6a55cae
|
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.structure.modules.module import Module
class NeuronLayer(Module):
"""Module conceptually representing a layer of units """
# Number of neurons
dim = 0
def __init__(self, dim, name=None):
"""Create a layer with dim number of units."""
Module.__init__(self, dim, dim, name=name)
self.setArgs(dim=dim)
def whichNeuron(self, inputIndex=None, outputIndex=None):
"""Determine which neuron a position in the input/output buffer
corresponds to. """
if inputIndex is not None:
return inputIndex
if outputIndex is not None:
return outputIndex
|
pybrain/pybrain
|
pybrain/structure/modules/neuronlayer.py
|
Python
|
bsd-3-clause
| 691
|
[
"NEURON"
] |
41eb31d74316fa33c2319f5218c771aceb38e78375ed3620cd66f87cba72c652
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from io import StringIO
import pytest
import numpy as np
from numpy.testing import assert_equal
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
PDB,
PDB_HOLE,
PDB_small,
PDB_conect,
PDB_conect2TER,
PDB_singleconect,
PDB_chainidnewres,
PDB_sameresid_diffresname,
PDB_helix,
PDB_elements,
)
from MDAnalysis.topology.PDBParser import PDBParser
from MDAnalysis import NoDataError
_PDBPARSER = mda.topology.PDBParser.PDBParser
hybrid36 = [
("A0000", 100000),
("MEGAN", 20929695),
("J0NNY", 15247214),
("DREW6", 6417862),
("ST3V3", 31691119),
("ADA8M", 719798),
("a0000", 43770016),
("megan", 64599711),
("j0nny", 58917230),
("drew6", 50087878),
("st3v3", 75361135),
("ada8m", 44389814),
(" 6", 6),
(" 24", 24),
(" 645", 645),
(" 4951", 4951),
("10267", 10267)
]
@pytest.mark.parametrize('hybrid, integer', hybrid36)
def test_hy36decode(hybrid, integer):
assert mda.topology.PDBParser.hy36decode(5, hybrid) == integer
class PDBBase(ParserBase):
expected_attrs = ['ids', 'names', 'record_types', 'resids',
'resnames', 'altLocs', 'icodes', 'occupancies',
'tempfactors', 'chainIDs']
guessed_attrs = ['types', 'masses']
class TestPDBParser(PDBBase):
"""This one has neither chainids or segids"""
parser = mda.topology.PDBParser.PDBParser
ref_filename = PDB
expected_n_atoms = 47681
expected_n_residues = 11302
expected_n_segments = 1
class TestPDBParserSegids(PDBBase):
"""Has segids"""
parser = mda.topology.PDBParser.PDBParser
ref_filename = PDB_small
expected_n_atoms = 3341
expected_n_residues = 214
expected_n_segments = 1
class TestPDBConect(object):
"""Testing PDB topology parsing (PDB)"""
def test_conect_parser(self):
lines = ("CONECT1233212331",
"CONECT123331233112334",
"CONECT123341233312335",
"CONECT123351233412336",
"CONECT12336123271233012335",
"CONECT12337 7718 84081234012344",
"CONECT1233812339123401234112345")
results = ((12332, [12331]),
(12333, [12331, 12334]),
(12334, [12333, 12335]),
(12335, [12334, 12336]),
(12336, [12327, 12330, 12335]),
(12337, [7718, 8408, 12340, 12344]),
(12338, [12339, 12340, 12341, 12345]))
for line, res in zip(lines, results):
bonds = mda.topology.PDBParser._parse_conect(line)
assert_equal(bonds[0], res[0])
for bond, res_bond in zip(bonds[1], res[1]):
assert_equal(bond, res_bond)
def test_conect_parser_runtime(self):
with pytest.raises(RuntimeError):
mda.topology.PDBParser._parse_conect('CONECT12337 7718 '
'84081234012344123')
def test_conect_topo_parser(self):
"""Check that the parser works as intended,
and that the returned value is a dictionary
"""
with _PDBPARSER(PDB_conect) as p:
top = p.parse()
assert isinstance(top, mda.core.topology.Topology)
def test_conect2ter():
def parse():
with PDBParser(PDB_conect2TER) as p:
struc = p.parse()
return struc
with pytest.warns(UserWarning):
struc = parse()
assert hasattr(struc, 'bonds')
assert len(struc.bonds.values) == 4
def test_single_conect():
def parse():
with PDBParser(PDB_singleconect) as p:
struc = p.parse()
return struc
with pytest.warns(UserWarning):
struc = parse()
assert hasattr(struc, 'bonds')
assert len(struc.bonds.values) == 2
def test_new_chainid_new_res():
# parser must start new residue when chainid starts
u = mda.Universe(PDB_chainidnewres)
assert len(u.residues) == 4
assert_equal(u.residues.resids, [1, 2, 3, 3])
assert len(u.segments) == 4
assert_equal(u.segments.segids, ['A', 'B', 'C', 'D'])
assert len(u.segments[0].atoms) == 5
assert len(u.segments[1].atoms) == 5
assert len(u.segments[2].atoms) == 5
assert len(u.segments[3].atoms) == 7
def test_sameresid_diffresname():
with _PDBPARSER(PDB_sameresid_diffresname) as p:
top = p.parse()
resids = [9, 9]
resnames = ['GLN', 'POPC']
for i, (resid, resname) in enumerate(zip(resids, resnames)):
assert top.resids.values[i] == resid
assert top.resnames.values[i] == resname
def test_PDB_record_types():
u = mda.Universe(PDB_HOLE)
assert u.atoms[0].record_type == 'ATOM'
assert u.atoms[132].record_type == 'HETATM'
assert_equal(u.atoms[10:20].record_types, 'ATOM')
assert_equal(u.atoms[271:].record_types, 'HETATM')
PDB_noresid = """\
REMARK For testing reading of CRYST
REMARK This has MODELs then CRYST entries
CRYST1 80.000 80.017 80.017 90.00 90.00 90.00 P 1 1
MODEL 1
ATOM 1 H2 TIP3 10.000 44.891 14.267 1.00 0.00 TIP3
ATOM 2 OH2 TIP3 67.275 48.893 23.568 1.00 0.00 TIP3
ATOM 3 H1 TIP3 66.641 48.181 23.485 1.00 0.00 TIP3
ATOM 4 H2 TIP3 66.986 49.547 22.931 1.00 0.00 TIP3
ENDMDL
"""
def test_PDB_no_resid():
u = mda.Universe(StringIO(PDB_noresid), format='PDB')
assert len(u.atoms) == 4
assert len(u.residues) == 1
# should have default resid of 1
assert u.residues[0].resid == 1
PDB_hex = """\
REMARK For testing reading of hex atom numbers
REMARK This has MODELs then hex atom numbers entries
CRYST1 80.000 80.017 80.017 90.00 90.00 90.00 P 1 1
MODEL 1
HETATM 1 H 2 L 400 20.168 00.034 40.428
HETATMA0000 H 2 L 400 40.168 50.034 40.428
HETATMA0001 H 2 L 400 30.453 60.495 50.132
HETATMA0002 H 2 L 400 20.576 40.354 60.483
HETATMA0003 H 2 L 400 10.208 30.067 70.045
END
"""
# this causes an error on Win64/Python 3.8 on Azure when loaded
# in as a file instead
PDB_metals = """\
HETATM 1 CU CU A 1 00.000 00.000 00.000 1.00 00.00 Cu
HETATM 2 FE FE A 2 03.000 03.000 03.000 1.00 00.00 Fe
HETATM 3 Ca Ca A 3 03.000 03.000 03.000 1.00 00.00 Ca
HETATM 3 Mg Mg A 3 03.000 03.000 03.000 1.00 00.00 Mg
"""
def test_PDB_hex():
u = mda.Universe(StringIO(PDB_hex), format='PDB')
assert len(u.atoms) == 5
assert u.atoms[0].id == 1
assert u.atoms[1].id == 100000
assert u.atoms[2].id == 100001
assert u.atoms[3].id == 100002
assert u.atoms[4].id == 100003
@pytest.mark.filterwarnings("error:Failed to guess the mass")
def test_PDB_metals():
from MDAnalysis.topology import tables
u = mda.Universe(StringIO(PDB_metals), format='PDB')
assert len(u.atoms) == 4
assert u.atoms[0].mass == pytest.approx(tables.masses["CU"])
assert u.atoms[1].mass == pytest.approx(tables.masses["FE"])
assert u.atoms[2].mass == pytest.approx(tables.masses["CA"])
assert u.atoms[3].mass == pytest.approx(tables.masses["MG"])
def test_PDB_elements():
"""The test checks whether elements attribute are assigned
properly given a PDB file with valid elements record.
"""
u = mda.Universe(PDB_elements, format='PDB')
element_list = np.array(['N', 'C', 'C', 'O', 'C', 'C', 'O', 'N', 'H',
'H', 'H', 'H', 'H', 'H', 'H', 'H', 'Cu', 'Fe',
'Mg', 'Ca', 'S', 'O', 'C', 'C', 'S', 'O', 'C',
'C'], dtype=object)
assert_equal(u.atoms.elements, element_list)
def test_missing_elements_noattribute():
"""Check that:
1) a warning is raised if elements are missing
2) the elements attribute is not set
"""
wmsg = ("Element information is missing, elements attribute will not be "
"populated")
with pytest.warns(UserWarning, match=wmsg):
u = mda.Universe(PDB_small)
with pytest.raises(AttributeError):
_ = u.atoms.elements
PDB_wrong_ele = """\
REMARK For testing warnings of wrong elements
REMARK This file represent invalid elements in the elements column
ATOM 1 N ASN A 1 -8.901 4.127 -0.555 1.00 0.00 N
ATOM 2 CA ASN A 1 -8.608 3.135 -1.618 1.00 0.00
ATOM 3 C ASN A 1 -7.117 2.964 -1.897 1.00 0.00 C
ATOM 4 O ASN A 1 -6.634 1.849 -1.758 1.00 0.00 O
ATOM 5 X ASN A 1 -9.437 3.396 -2.889 1.00 0.00 XX
TER 6
HETATM 7 CU CU A 2 03.000 00.000 00.000 1.00 00.00 CU
HETATM 8 FE FE A 3 00.000 03.000 00.000 1.00 00.00 Fe
HETATM 9 Mg Mg A 4 03.000 03.000 03.000 1.00 00.00 MG
TER 10
"""
def test_wrong_elements_warnings():
"""The test checks whether there are invalid elements in the elements
column which have been parsed and returns an appropriate warning.
"""
with pytest.warns(UserWarning, match='Unknown element XX found'):
u = mda.Universe(StringIO(PDB_wrong_ele), format='PDB')
expected = np.array(['N', '', 'C', 'O', '', 'Cu', 'Fe', 'Mg'],
dtype=object)
assert_equal(u.atoms.elements, expected)
def test_nobonds_error():
"""Issue #2832: PDB without CONECT record should not have a bonds
attribute and raises NoDataError on access"""
u = mda.Universe(PDB_helix)
errmsg = "This Universe does not contain bonds information"
with pytest.raises(NoDataError, match=errmsg):
u.atoms.bonds
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/topology/test_pdb.py
|
Python
|
gpl-2.0
| 10,944
|
[
"MDAnalysis"
] |
7ace0710386345f0464e9ee66c45b21194cf6f8db228f4bb2199004a1e1a8149
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2009-2013 CRS4.
#
# This file is part of biodoop-blast.
#
# biodoop-blast is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# biodoop-blast is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# biodoop-blast. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
import pydoop.pipes as pp
class Reducer(pp.Reducer):
def __init__(self, ctx):
super(reducer, self).__init__(ctx)
def reduce(self, ctx):
pass
|
crs4/biodoop-blast
|
bl/blast/mr/blastall/reducer.py
|
Python
|
gpl-3.0
| 914
|
[
"BLAST"
] |
df6363839dc192dea9d2d285d15472507c8d2d2ab71b4a40cdbcdf36f49d2f28
|
#Copywrite (c) Paul & John Ashby 2014
#Weight Effect
#This game is an RPG that is designed by my brother (John) and myself (Paul).
#We built this to explore python programing, open source github, and to have some fun.
#
print("\\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\")
print("\\ \\ \\ \\ \\Weight Effect \\ \\ \\ \\ \\")
print("\\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\")
print("\nBy Paul & John Ashby")
main = input("\n\nWelcome, would you like to start a new game or exit?\n(new or exit)\n:>")
#------Imported Modules------#
import random
#------Defined Variables------#
wave = 1
#------Defined Lists------#
#dwarf: [name, str, dex, int, dam, arm, pot, hp, gold]
dwarf = [ 0, 0, 0, 0, 0, 0, 0, 0, 0]
#------Defined Functions------#
def char_inventory():#Prints out the stats of the player.
print("Here is what your character looks like so far."
"\n\nName:", dwarf[0],
"\nStrength:", dwarf[1],
"\nDexterity:", dwarf[2],
"\nIntelligence:", dwarf[3],
"\nDamage:", dwarf[4],
"\nArmor:", dwarf[5],
"\nPotions:", dwarf[6],
"\nHP:", dwarf[7],
"\nGold:", dwarf[8])
def stat_modder(x, y, z, a):
#x is the stat list, y is the stat location,
#z is the new value, a is the command flag
stat = x
if (a == 1):#A 1 value for A mean generate character stats
for i in range(1, 5):
stat[i] = random.randint(5,20)
stat[5] = random.randint(3, 18)
stat[7] = 100 + stat[1]
stat[8] = random.randint(50, 100)
elif(a == 2):#A 2 value for A means generate magic stats
for i in range(3):
stat[i] = random.randint(1, 20)
stat[3] = 1
stat[4] = 1
elif(a == 3):#A 3 value for A means modify a stat
stat[y] = z
elif(a == 4): #A 4 value for A means generate stats for a monster wave.
stat[0] = random.randint((2 * wave), (4 * wave)) #Determines the amount monsters in a wave.
for i in range(1, 5):
stat[i] = random.randint((5 * wave), (15 * wave)) #Generates the monsters stats.
stat[5] = random.randint((3 * wave), (12 * wave)) #Generates monster
stat[6] = random.randint((10 * wave * stat[0]), (25 * wave * stat[0])) #Generates the monsters health.
else:
print('Burn the heretic')
return stat
def magic(command):
#Stats are as follows, 0=Self Knowledge, 1=Will, 2=Concentration 3=Lore
#4=Corruption
dwarf_magic = [0, 0, 0, 0, 0]
if(command == 'read dark tome'):#Boom, long slippery slope baby...
print("""As you brush the dust from the cover of the dark tome,
a chill runs up your arm. A glance down reveals the title,
'Meditations on the Unseen Worlds'. You slowly open the
book and begin to read...""")
print("""It seems to be a journal written by a scholar detailing his
exploration of these supposed unseen worlds. He goes into great
detail in his methods and you think you could replicate his
work. """)
path = input("""Do you stop reading?
(yes or no):> """)
if(path == 'yes'):
print("Scoffing at the madman's claims, you toss the book to the side.")
else:
print("Drawn on by his compelling claims, you continue to read the book...")
dwarf_magic = stat_modder(dwarf_magic, 0, 0, 2)
print("Following the tome's direction, you determine your mental status...")
stat = (dwarf_magic[1] + (20-dwarf_magic[0]))
if(stat < 15):
print("You think your Will is poor.")
elif(15 < stat < 30):
print("You think your Will is average.")
else:
print("You think your Will is great!")
stat = (dwarf_magic[2] + (20-dwarf_magic[0]))
if(stat < 15):
print("You think your Concentration is poor.")
elif(15 < stat < 30):
print("You think your Concentration is average.")
else:
print("You think your Concentration is great!")
def shop_commands():#Lists all the commands for the shopping loop.
print("\nList of commands:",
"\n\tlist : Shows list of commands.",
"\n\tshop : Prints shop inventory.",
"\n\tinv : Prints your inventory,",
"\n\tbuy : Asks to buy an item from the shop.",
"\n\texit : Exits the shop.")
def shop():
store_inventory = []
store_inventory.append(random.randint(1, 20) * wave) #Generates a random value weapon.
store_inventory.append(random.randint(1, 18) * wave) #Generates a random value armor.
store_inventory.append(random.randint(1, 6) * wave) #Generates a random number of potions.
print("WELCOME TO YE OLD LOCAL SHOPPE")
print("\nOur current inventory consists of the following items:")
print("An axe that can do", store_inventory[0], "damage.")
print("A set of armor that protects against", store_inventory[1], "points of damage.")
print("We also have", store_inventory[2], "health potion(s).")
shop_commands()
sCommand = "nothing yet"
while sCommand != "exit":
sCommand = input("Please enter a command from the list.\n:> ")
if sCommand == "list":
shop_commands()
elif sCommand == "shop":
print("Our current inventory consists of the following items:")
print("An axe that can do", store_inventory[0], "damage.")
print("A set of armor that protects against", store_inventory[1], "points of damage.")
print("We also have", store_inventory[2], "health potion(s).")
elif sCommand == "inv":
print("You have the following in your inventory.",
"\n\nAn axe that can do", dwarf[4], "damage.",
"\nA set of armor that can withstand", dwarf[5], "points of damage.\n",
dwarf[6], "Health potions.\n",
dwarf[8], "Gold.")
elif sCommand == "buy":
print("You can buy the following items:")
print("(1) = Axe",
"\n(2) = Armor",
"\n(3) = Potions")
purchase = input("Buy an item from the shop by entering the number that coorosponds to that item.\n:> ")
if purchase == "1": #buying the axe.
cost = store_inventory[0] * 3 * wave
print("This axe will cost", cost, "gold.")
purchase = input("Do you still want to buy this axe?\n(y/n)\n:> ")
if purchase == "y":
posEight = int(dwarf[8])
if posEight < cost:
print("YOU DON'T HAVE THAT KIND OF MONEY!")
input("No purchase made, press enter to continue.")
else:
del dwarf[4]
dwarf.insert(4, store_inventory[0])
posEight -= cost
del dwarf[8]
dwarf.insert(8, posEight)
char_inventory()
elif purchase == "2": #buying the armor.
cost = store_inventory[1] * 3 * wave
print("This armor will cost", cost, "gold.")
purchase = input("Do you still want to buy this armor?\n(y/n)\n:> ")
if purchase == "y":
posEight = int(dwarf[8])
if posEight < cost:
print("YOU DON'T HAVE THAT KIND OF MONEY!")
input("No purchase made, press enter to continue.")
else:
del dwarf[5]
dwarf.insert(5, store_inventory[1])
posEight -= cost
del dwarf[8]
dwarf.insert(8, posEight)
char_inventory()
elif purchase == "3": #buying some potions.
cost = 25 * wave
print("Each potion will cost", cost, "gold.")
purchase = input("Do you still want to buy some potions?\n(y/n)\n:> ")
if purchase == "y":
numberPotions = int(input("How many potions do you want to buy?\n:> "))
if numberPotions > store_inventory[2]:
print("THERE ARE NOT THAT MANY POTIONS IN THE STORE!")
input("Press enter to go back to the shop menu.")
posEight = dwarf[8]
if posEight < (cost * numberPotions):
print("YOU DON'T HAVE THAT KIND OF MONEY!")
input("No purchase made, press enter to continue.")
else:
dwarf[6] += numberPotions
posEight -= (cost * numberPotions)
dwarf[8] = posEight
char_inventory()
def command():
services = [1, 1, 0, 0] #Index 0 is a bedroll, 1 is the shop, 2 is the tome, and 3 is a beer seller
time_left = 2
print("\n\nThe camp is a low, cramped room off of the kobold tunnels.")
if(services[0] == 1):
print("You see a drab straw bedroll shoved in the corner.")
if(services[1] == 1):
print("You see a battered looking tinker standing near a cart of wares.")
if(services[2] == 1):
print("You see a dark tome lying on a table.")
if(services[3] == 1):
print("You see a beerseller partying by the fire.")
while(time_left > 0):
user_command = input("What would you like to do? (shop, read, sleep, party):> ")
if (user_command == 'shop' and services[1] == 1):
shop()
time_left -= 1
elif (user_command == 'sleep' and services[0] == 1):
print("Weary, you collapse onto the bedroll try to sleep")
time_left -= 1
elif (user_command == 'read' and services[2] == 1):
magic(read)
time_left -= 1
elif (user_command == 'party' and services[3] == 1):
print("Are you insane?")
time_left -= 1
else:
print("What?")
def stance_set(oldstance): #takes an argument from previous function and names it 'oldstance'.
#Looks at the argument list to see which stance the character is in and informs the player.
if oldstance[0] == "Aggressive":
print("\nYour current combat stance is 'Aggressive', you will",
"\ndeal more damage but you will also take more as a result.")
if oldstance[0] == "Balanced":
print("\nYour current combat stance is 'Balanced', you will",
"\nnot take any penalties.")
if oldstance[0] == "Defensive":
print("\nYour current combat stance is 'Defensive', you will",
"\nbe better ready to defend yourself but may find it difficult",
"\nto get a good attack in.")
change = input("\nDo you want to change your stance?\n(y/n)\n:> ")
if change == "y":
print("\nHere are the stances available to you:",
"\n(1) - Aggressive (increased attack, decreased armor)",
"\n(2) - Balanced (no combat penalties)",
"\n(3) - Defensive (increased armor, decreased attack)")
newstance_number = int(input("Enter the number that corresponds to your choice and press enter.\n:> "))
if newstance_number == 1:
newstance = ["Aggressive"]
elif newstance_number == 2:
newstance = ["Balanced"]
elif newstance_number == 3:
newstance = ["Defensive"]
print("\nYou have changed your combat stance to:", newstance[0])
return newstance #returns the new stance
else:
return oldstance #returns the original stance
def initiative(monsters):
monster_initiative = random.randint(1 + monsters[2], 20 + monsters[2])
player_initiative = random.randint(1 + dwarf[2], 20 + dwarf[2])
print("\nRolling initiative!")
print("\nYou rolled a",player_initiative, "and the Kobolds rolled a", monster_initiative)
if monster_initiative >= player_initiative:
print("Looks like the Kobolds are attacking first, brace yourself.")
turn = 2 #Sets round flag so monsters do damage first.
return turn
else:
print("You attack first!")
turn = 1 #Sets round flag so player does damage first.
return turn
def player_attack(player_damage, number_monsters, total_monster_hp, monster_armor, stance):
new_damage = round(player_damage + random.randint(1, 10))
#test print
#print("new_damage1 line 304 (expect 1 - 10 initially)", new_damage)
#input()
monster_deaths = 0
new_damage += round(dwarf[4] - monster_armor) #Damage equals char damage - monster armor.
#test print
#print("new_damage2 line 309", new_damage)
#input()
if stance == "Aggressive": #Aggressive adds the character strength to the damage.
stance_damage = dwarf[1]
elif stance == "Balanced": #Balanced adds the character dexterity/4 to the damage.
stance_damage = round(dwarf[2] / 4)
elif stance == "Defensive": #Defensive adds the difference of the characters str and dex to the total damage.
stance_damage = round(dwarf[1] - dwarf[2])
#test print
#print("stance_damage line 319", stance_damage)
#input()
new_damage += stance_damage #Adds/subtracts stance damage to new_damage.
#test print
#print("new_damage3 line 324", new_damage)
#input()
min_monster_hp = round(total_monster_hp / number_monsters) #Builds benchmark for min damage to kill monster.
#test print
#print("min_monster_hp line 329", min_monster_hp)
#input()
while new_damage >= min_monster_hp and number_monsters > 0:
number_monsters -= 1
monster_deaths += 1 #Used to subtract from total monsters.
#Test print
#print("Number of monsters left? ", number_monsters)
#print("Number of monsters killed?", monster_deaths)
input()
new_damage -= min_monster_hp
#Test print
#print("new_damage remaining? (line 339)", new_damage)
#input()
total_monster_hp -= min_monster_hp
#Test print
#print("total_monster_hp remaining? (line 343) ", total_monster_hp)
#input()
if number_monsters <= 0 or new_damage < min_monster_hp:
new_values = [new_damage, monster_deaths, total_monster_hp]
return new_values
def monster_attack(monster_damage, player_armor, player_hp, stance):
monster_damage += (random.randint(1 + wave, 10 + wave))
if stance == "Defensive":
monster_damage -= round(player_armor * 2)
else:
monster_damage -= player_armor
if monster_damage <= 0:
print("\nThe Kobolds attack doing", monster_damage, "damage!")
print("You take no damage from the attack.")
input("\nPress enter to continue.")
return player_hp
else:
player_hp -= monster_damage
print("\nThe Kobolds attack doing", monster_damage, "damage!")
print("You have", player_hp, "health left.")
input("Press enter to continue.")
if player_hp <= 0:
input("You have perished in combat!\n\nPress enter to continue.")
exit()
else:
return player_hp
def combat(current_player_hp):
#monsters: [num, str, dex, int, dam, arm, hp]
monsters = ["0", "0", "0", "0", "0", "0", "0"]
stat_modder(monsters, 1, 1, 4)
num_monsters = monsters[0] #Saving the number of monsters for the stat_adder.
#Test print
#print("monsters (line 379)", monsters)
print("\n\nPrepare yourself, you see", monsters[0], "Kobolds charging at you!")
stance = ["Balanced"] #Sets the initial stance.
player_damage_counter = 0 #Keeps track of how much damage the player does.
new_loot = random.randint(50 * monsters[0] * wave, 100 * monsters[0] * wave)
#Test print
#print("new_loot (line 378)", new_loot)
#input()
while monsters[0] > 0 and dwarf[7] > 0:#loops until all monsters are dead or the player dies.
stance = stance_set(stance) #Allows the player to change their stance before each round of combat.
turn = initiative(monsters) #Rolls for initiative to see who goes first.
check_move = 1 #Flag that notifies loop that player has or has not gone.
if turn == 1:
result = player_attack(player_damage_counter, monsters[0], monsters[6], monsters[5], stance[0])
#test print
#print("result list (line 401)", result)
#input()
player_damage_counter = result[0] #Updates total player damage.
monsters[0] -= result[1] #Updates monsters left.
monsters[6] = result[2] #Updates total monster HP.
print("You have slain", result[1], "Kobolds this round! There are", monsters[0], "left.")
if monsters[0] <= 0:
print("You have slain all the Kobolds!")
print("Congratulations!")
dwarf[8] += new_loot
print("You loot", new_loot, "gold from the slain kobolds.")
input("\nPress enter to continue.")
break
turn = 2
check_move = 0 #lets loop know player has already moved this round.
if turn == 2:
current_player_hp = monster_attack(monsters[4], dwarf[5], current_player_hp, stance)
#test print
#print("current_player_hp line 405:", current_player_hp)
#input()
if check_move == 1:
result = player_attack(player_damage_counter, monsters[0], monsters[6], monsters[5], stance[0])
#test print
#print("checking result (second 'if') (line 427)", result)
#input()
player_damage_counter = result[0] #Updates total player damage.
monsters[0] -= result[1] #Updates monsters left.
monsters[6] = result[2] #Updates total monster HP.
print("You have slain", result[1], "Kobolds this round! There are", monsters[0], "left.")
if monsters[0] <= 0:
print("You have slain all the Kobolds!")
print("Congratulations!")
dwarf[8] += new_loot
print("You loot", new_loot, "gold from the slain kobolds.")
input("\nPress enter to continue.")
break
stat_adder(num_monsters)
return current_player_hp
def potion(current_player_hp):
print("\nYou have", current_player_hp, "health remaining.")
print("You have", dwarf[6], "health potions remaining.")
if dwarf[6] > 0:
print("Do you want to use a health potion to restore your HP?\n(y,n)\n:>")
use = input()
if use == "y":
dwarf[6] -= 1
current_player_hp = dwarf[7]
print("You have regained your health after drinking a potion.")
print("You have", dwarf[7], "health and", dwarf[6], "potions remaining.")
return current_player_hp
else:
print("Good luck then.")
return current_player_hp
else:
input("Unfortunantely you don't have any potions to restore your health.\nPress enter to continue.")
return current_player_hp
def stat_adder(num_monsters):
print("You have gained enough experience improve one of your stats.")
print("\nEnter the number that coorosponds to the stat you want to modify.",
"\n(1) Strength",
"\n(2) Dexterity",
"\n(3) Inteligence")
stat = int(input())
if stat == 1:
str_bonus = random.randint(1 + num_monsters, 6 + num_monsters)
#testprint
print("strength bonus:", str_bonus)
input()
dwarf[1] += str_bonus
dwarf[7] += str_bonus
print("\nYou have increased your strength and HP by", str_bonus,)
input("Press enter to continue.")
elif stat == 2:
dex_bonus = random.randint(1 + num_monsters, 6 + num_monsters)
#testprint
print("Dex bonus:", dex_bonus)
input()
dwarf[2] += dex_bonus
print("\nYou have increased your dexterity by", dex_bonus)
input("Press enter to continue.")
elif stat == 3:
int_bonus = random.randint(1 + num_monsters, 6 + num_monsters)
#testprint
print("Inteligence bonus:", int_bonus)
input()
dwarf[3] += int_bonus
print("\nYou have increased inteligence by", int_bonus)
input("Press enter to continue.")
char_inventory()
#------MAIN LOOP------#
while True:
if main == 'new':
for i in range(5): #This loop controls character creation.
name = input("What is the name of your dwarf?\n:> ")
stat_modder(dwarf, 0, name, 3)
print("Generating stats for your dwarf!")
stat_modder(dwarf, 1, 1, 1)
char_inventory()
reroll = input("Do you want to re-roll your character? (y/n)\n:> ")
if reroll == "n":
break
else:
print("You can re-roll your dwarf", 5 - (i + 1), "more times.")
current_player_hp = dwarf[7]
main = 'end'
spend = input("Do you want to go to a shop before you fight endless waves of monsters?\n(y/n)\n:> ")
if spend == "y":
shop()
print("Prepare for combat!")
current_player_hp = combat(current_player_hp)
current_player_hp = potion(current_player_hp)
wave += 1
command()
|
bedevere/FailHub
|
weighteffect.py
|
Python
|
mit
| 23,719
|
[
"TINKER"
] |
3b68711ab085c96e12eb70c283121c64f02b9cbabb8e862d48e1ddfb04cd17f4
|
# Copyright (C) 2014 Brian Marshall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gettext
gettext.install('sisko')
|
bmars/sisko
|
sisko/__init__.py
|
Python
|
gpl-3.0
| 713
|
[
"Brian"
] |
4c46d25b1ae2b7fec9dbb59911c6965fc6d92ff0de8d8d216579e3325a4d363f
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.import string
"""
function for calculating the convergence of an x, y data set
main api:
test_conv(xs, ys, name, tol)
tries to fit multiple functions to the x, y data
calculates which function fits best
for tol < 0
returns the x value for which y is converged within tol of the assymtotic value
for tol > 0
returns the x_value for which dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists
for the best fit a gnuplot line is printed plotting the data, the function and the assymthotic value
"""
import random
import string
import numpy as np
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "June 2014"
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
"""
Args:
size ():
chars ():
Returns:
"""
return "".join(random.choice(chars) for _ in range(size))
class SplineInputError(Exception):
"""
Error for Spline input
"""
def __init__(self, msg):
"""
Args:
msg (str): Message
"""
self.msg = msg
def get_derivatives(xs, ys, fd=False):
"""
return the derivatives of y(x) at the points x
if scipy is available a spline is generated to calculate the derivatives
if scipy is not available the left and right slopes are calculated, if both exist the average is returned
putting fd to zero always returns the finite difference slopes
"""
try:
if fd:
raise SplineInputError("no spline wanted")
if len(xs) < 4:
er = SplineInputError("too few data points")
raise er
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(xs, ys)
d = spline.derivative(1)(xs)
except (ImportError, SplineInputError):
d = []
m, left, right = 0, 0, 0
for n in range(0, len(xs), 1):
try:
left = (ys[n] - ys[n - 1]) / (xs[n] - xs[n - 1])
m += 1
except IndexError:
pass
try:
right = (ys[n + 1] - ys[n]) / (xs[n + 1] - xs[n])
m += 1
except IndexError:
pass
d.append(left + right / m)
return d
"""
functions used in the fitting procedure, with initial guesses
"""
def print_and_raise_error(xs, ys, name):
"""
Args:
xs ():
ys ():
name ():
Returns:
"""
print("Index error in", name)
print("ys: ", ys)
print("xs: ", xs)
raise RuntimeError
def reciprocal(x, a, b, n):
"""
reciprocal function to the power n to fit convergence data
"""
if n < 1:
n = 1
elif n > 5:
n = 5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** n)
y = np.array(y_l)
else:
y = a + b / x ** n
return y
def p0_reciprocal(xs, ys):
"""
predictor for first guess for reciprocal
"""
a0 = ys[len(ys) - 1]
b0 = ys[0] * xs[0] - a0 * xs[0]
return [a0, b0, 1]
def exponential(x, a, b, n):
"""
exponential function base n to fit convergence data
"""
if n < 1.000001:
n = 1.000001
elif n > 1.2:
n = 1.2
if b < -10:
b = -10
elif b > 10:
b = 10
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b * n ** -x_v)
y = np.array(y_l)
else:
y = a + b * n ** -x
return y
def p0_exponential(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
n0 = 1.005
b0 = (n0 ** -xs[-1] - n0 ** -xs[1]) / (ys[-1] - ys[1])
a0 = ys[1] - b0 * n0 ** -xs[1]
# a0 = ys[-1]
# b0 = (ys[0] - a0) / n0 ** xs[0]
return [a0, b0, n0]
def single_reciprocal(x, a, b, c):
"""
reciprocal function to fit convergence data
"""
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / (x_v - c))
y = np.array(y_l)
else:
y = a + b / (x - c)
return y
def p0_single_reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 1
b = (1 / (xs[-1] - c) - 1 / (xs[1] - c)) / (ys[-1] - ys[1])
a = ys[1] - b / (xs[1] - c)
return [a, b, c]
def simple_reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v)
y = np.array(y_l)
else:
y = a + b / x
return y
def p0_simple_reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
# b = (ys[-1] - ys[1]) / (1/xs[-1] - 1/xs[1])
# a = ys[1] - b / xs[1]
b = (ys[-1] - ys[-2]) / (1 / (xs[-1]) - 1 / (xs[-2]))
a = ys[-2] - b / (xs[-2])
return [a, b]
def simple_2reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 2
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_2reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 2
b = (ys[-1] - ys[1]) / (1 / xs[-1] ** c - 1 / xs[1] ** c)
a = ys[1] - b / xs[1] ** c
return [a, b]
def simple_4reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 4
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_4reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 4
b = (ys[-1] - ys[1]) / (1 / xs[-1] ** c - 1 / xs[1] ** c)
a = ys[1] - b / xs[1] ** c
return [a, b]
def simple_5reciprocal(x, a, b):
"""
reciprocal function to fit convergence data
"""
c = 0.5
if isinstance(x, list):
y_l = []
for x_v in x:
y_l.append(a + b / x_v ** c)
y = np.array(y_l)
else:
y = a + b / x ** c
return y
def p0_simple_5reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
c = 0.5
b = (ys[-1] - ys[1]) / (1 / xs[-1] ** c - 1 / xs[1] ** c)
a = ys[1] - b / xs[1] ** c
return [a, b]
def extrapolate_simple_reciprocal(xs, ys):
"""
Args:
xs ():
ys ():
Returns:
"""
b = (ys[-2] - ys[-1]) / (1 / (xs[-2]) - 1 / (xs[-1]))
a = ys[-1] - b / (xs[-1])
return [a, b]
def extrapolate_reciprocal(xs, ys, n, noise):
"""
return the parameters such that a + b / x^n hits the last two data points
"""
if len(xs) > 4 and noise:
y1 = (ys[-3] + ys[-4]) / 2
y2 = (ys[-1] + ys[-2]) / 2
x1 = (xs[-3] + xs[-4]) / 2
x2 = (xs[-1] + xs[-2]) / 2
try:
b = (y1 - y2) / (1 / x1 ** n - 1 / x2 ** n)
a = y2 - b / x2 ** n
except IndexError:
print_and_raise_error(xs, ys, "extrapolate_reciprocal")
else:
try:
b = (ys[-2] - ys[-1]) / (1 / (xs[-2]) ** n - 1 / (xs[-1]) ** n)
a = ys[-1] - b / (xs[-1]) ** n
except IndexError:
print_and_raise_error(xs, ys, "extrapolate_reciprocal")
return [a, b, n]
def measure(function, xs, ys, popt, weights):
"""
measure the quality of a fit
"""
m = 0
n = 0
for x in xs:
try:
if len(popt) == 2:
m += (ys[n] - function(x, popt[0], popt[1])) ** 2 * weights[n]
elif len(popt) == 3:
m += (ys[n] - function(x, popt[0], popt[1], popt[2])) ** 2 * weights[n]
else:
raise NotImplementedError
n += 1
except IndexError:
raise RuntimeError("y does not exist for x = ", x, " this should not happen")
return m
def get_weights(xs, ys, mode=2):
"""
Args:
xs ():
ys ():
mode ():
Returns:
"""
ds = get_derivatives(xs, ys, fd=True)
if mode == 1:
mind = np.inf
for d in ds:
mind = min(abs(d), mind)
weights = []
for d in ds:
weights.append(abs(mind / d))
if mode == 2:
maxxs = max(xs) ** 2
weights = []
for x in xs:
weights.append(x ** 2 / maxxs)
else:
weights = [1] * len(xs)
return weights
def multi_curve_fit(xs, ys, verbose):
"""
fit multiple functions to the x, y data, return the best fit
"""
# functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}
functions = {
exponential: p0_exponential,
reciprocal: p0_reciprocal,
# single_reciprocal: p0_single_reciprocal,
simple_reciprocal: p0_simple_reciprocal,
simple_2reciprocal: p0_simple_2reciprocal,
simple_4reciprocal: p0_simple_4reciprocal,
simple_5reciprocal: p0_simple_5reciprocal,
}
from scipy.optimize import curve_fit
fit_results = {}
best = ["", np.inf]
for k, v in functions.items():
try:
weights = get_weights(xs, ys)
popt, pcov = curve_fit(
k,
xs,
ys,
v(xs, ys),
maxfev=8000,
sigma=weights,
)
pcov = []
m = measure(k, xs, ys, popt, weights)
fit_results.update({k: {"measure": m, "popt": popt, "pcov": pcov}})
for f, v in fit_results.items():
if v["measure"] <= best[1]:
best = f, v["measure"]
if verbose:
print(str(k), m)
except RuntimeError:
print("no fit found for ", k)
return fit_results[best[0]]["popt"], fit_results[best[0]]["pcov"], best
def multi_reciprocal_extra(xs, ys, noise=False):
"""
Calculates for a series of powers ns the parameters for which the last two points are at the curve.
With these parameters measure how well the other data points fit.
return the best fit.
"""
ns = np.linspace(0.5, 6.0, num=56)
best = ["", np.inf]
fit_results = {}
weights = get_weights(xs, ys)
for n in ns:
popt = extrapolate_reciprocal(xs, ys, n, noise)
m = measure(reciprocal, xs, ys, popt, weights)
pcov = []
fit_results.update({n: {"measure": m, "popt": popt, "pcov": pcov}})
for n, v in fit_results.items():
if v["measure"] <= best[1]:
best = reciprocal, v["measure"], n
return fit_results[best[2]]["popt"], fit_results[best[2]]["pcov"], best
def print_plot_line(function, popt, xs, ys, name, tol=0.05, extra=""):
"""
print the gnuplot command line to plot the x, y data with the fitted function using the popt parameters
"""
idp = id_generator()
with open("convdat." + str(idp), mode="w") as f:
for n in range(0, len(ys), 1):
f.write(str(xs[n]) + " " + str(ys[n]) + "\n")
tol = abs(tol)
line = "plot 'convdat.%s' pointsize 4 lt 0, " % idp
line += f"{popt[0]} lt 3, {popt[0] - tol} lt 4, {popt[0] + tol} lt 4, "
if function is exponential:
line += "{} + {} * {} ** -x".format(
popt[0],
popt[1],
min(max(1.00001, popt[2]), 1.2),
)
elif function is reciprocal:
line += f"{popt[0]} + {popt[1]} / x**{min(max(0.5, popt[2]), 6)}"
elif function is single_reciprocal:
line += f"{popt[0]} + {popt[1]} / (x - {popt[2]})"
elif function is simple_reciprocal:
line += f"{popt[0]} + {popt[1]} / x"
elif function is simple_2reciprocal:
line += f"{popt[0]} + {popt[1]} / x**2"
elif function is simple_4reciprocal:
line += f"{popt[0]} + {popt[1]} / x**4"
elif function is simple_5reciprocal:
line += f"{popt[0]} + {popt[1]} / x**0.5"
else:
print(function, " no plot ")
with open("plot-fits", mode="a") as f:
f.write('set title "' + name + " - " + extra + '"\n')
f.write("set output '" + name + "-" + idp + ".gif'" + "\n")
f.write("set yrange [" + str(popt[0] - 5 * tol) + ":" + str(popt[0] + 5 * tol) + "]\n")
f.write(line + "\n")
f.write("pause -1 \n")
def determine_convergence(xs, ys, name, tol=0.0001, extra="", verbose=False, mode="extra", plots=True):
"""
test it and at which x_value dy(x)/dx < tol for all x >= x_value, conv is true is such a x_value exists.
"""
if len(xs) != len(ys):
raise RuntimeError("the range of x and y are not equal")
conv = False
x_value = float("inf")
y_value = None
n_value = None
popt = [None, None, None]
if len(xs) > 2:
ds = get_derivatives(xs[0 : len(ys)], ys)
try:
if None not in ys:
if mode == "fit":
popt, pcov, func = multi_curve_fit(xs, ys, verbose)
elif mode == "extra":
res = multi_reciprocal_extra(xs, ys)
if res is not None:
popt, pcov, func = multi_reciprocal_extra(xs, ys)
else:
print(xs, ys)
popt, pcov = None, None
elif mode == "extra_noise":
popt, pcov, func = multi_reciprocal_extra(xs, ys, noise=True)
else:
raise NotImplementedError("unknown mode for test conv")
if func[1] > abs(tol):
print(
"warning function ",
func[0],
" as the best fit but not a good fit: ",
func[1],
)
# todo print this to file via a method in helper, as dict
if plots:
with open(name + ".fitdat", mode="a") as f:
f.write("{")
f.write('"popt": ' + str(popt) + ", ")
f.write('"pcov": ' + str(pcov) + ", ")
f.write('"data": [')
for n in range(0, len(ys), 1):
f.write("[" + str(xs[n]) + " " + str(ys[n]) + "]")
f.write("]}\n")
print_plot_line(func[0], popt, xs, ys, name, tol=tol, extra=extra)
except ImportError:
popt, pcov = None, None
for n in range(0, len(ds), 1):
if verbose:
print(n, ys[n])
print(ys)
if tol < 0:
if popt[0] is not None:
test = abs(popt[0] - ys[n])
else:
test = float("inf")
else:
test = abs(ds[n])
if verbose:
print(test)
if test < abs(tol):
if verbose:
print("converged")
conv = True
if xs[n] < x_value:
x_value = xs[n]
y_value = ys[n]
n_value = n
else:
if verbose:
print("not converged")
conv = False
x_value = float("inf")
if n_value is None:
return [conv, x_value, y_value, n_value, popt[0], None]
return [conv, x_value, y_value, n_value, popt[0], ds[n_value]]
return [conv, x_value, y_value, n_value, popt[0], None]
|
vorwerkc/pymatgen
|
pymatgen/util/convergence.py
|
Python
|
mit
| 15,849
|
[
"pymatgen"
] |
9a3fb6500171081914a1878bf6a0f5960989628ca4e26e83e7cc4518cc30803b
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchevent_rule
short_description: Manage CloudWatch Event rules and targets
description:
- This module creates and manages CloudWatch event rules and targets.
version_added: "2.2"
extends_documentation_fragment:
- aws
author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
requirements:
- python >= 2.6
- boto3
notes:
- A rule must contain at least an I(event_pattern) or I(schedule_expression). A
rule can have both an I(event_pattern) and a I(schedule_expression), in which
case the rule will trigger on matching events as well as on a schedule.
- When specifying targets, I(input) and I(input_path) are mutually-exclusive
and optional parameters.
options:
name:
description:
- The name of the rule you are creating, updating or deleting. No spaces
or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+))
required: true
schedule_expression:
description:
- A cron or rate expression that defines the schedule the rule will
trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes))
required: false
event_pattern:
description:
- A string pattern (in valid JSON format) that is used to match against
incoming events to determine if the rule should be triggered
required: false
state:
description:
- Whether the rule is present (and enabled), disabled, or absent
choices: ["present", "disabled", "absent"]
default: present
required: false
description:
description:
- A description of the rule
required: false
role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role associated with the rule
required: false
targets:
description:
- "A dictionary array of targets to add to or update for the rule, in the
form C({ id: [string], arn: [string], role_arn: [string], input: [valid JSON string],
input_path: [valid JSONPath string], ecs_parameters: {task_definition_arn: [string], task_count: [int]}}).
I(id) [required] is the unique target assignment ID. I(arn) (required)
is the Amazon Resource Name associated with the target. I(role_arn) (optional) is The Amazon Resource Name
of the IAM role to be used for this target when the rule is triggered. I(input)
(optional) is a JSON object that will override the event data when
passed to the target. I(input_path) (optional) is a JSONPath string
(e.g. C($.detail)) that specifies the part of the event data to be
passed to the target. If neither I(input) nor I(input_path) is
specified, then the entire event is passed to the target in JSON form.
I(task_definition_arn) [optional] is ecs task definition arn.
I(task_count) [optional] is ecs task count."
required: false
'''
EXAMPLES = '''
- cloudwatchevent_rule:
name: MyCronTask
schedule_expression: "cron(0 20 * * ? *)"
description: Run my scheduled task
targets:
- id: MyTargetId
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
- cloudwatchevent_rule:
name: MyDisabledCronTask
schedule_expression: "cron(5 minutes)"
description: Run my disabled scheduled task
state: disabled
targets:
- id: MyOtherTargetId
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
input: '{"foo": "bar"}'
- cloudwatchevent_rule:
name: MyCronTask
state: absent
'''
RETURN = '''
rule:
description: CloudWatch Event rule data
returned: success
type: dict
sample: "{ 'arn': 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask', 'description': 'Run my scheduled task', 'name': 'MyCronTask', 'schedule_expression': 'cron(0 20 * * ? *)', 'state': 'ENABLED' }"
targets:
description: CloudWatch Event target(s) assigned to the rule
returned: success
type: list
sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
''' # NOQA
try:
import boto3.exception
import botocore.exceptions
except ImportError:
# module_utils.ec2.HAS_BOTO3 will do the right thing
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
class CloudWatchEventRule(object):
def __init__(self, module, name, client, schedule_expression=None,
event_pattern=None, description=None, role_arn=None):
self.name = name
self.client = client
self.changed = False
self.schedule_expression = schedule_expression
self.event_pattern = event_pattern
self.description = description
self.role_arn = role_arn
def describe(self):
"""Returns the existing details of the rule in AWS"""
try:
rule_info = self.client.describe_rule(Name=self.name)
except botocore.exceptions.ClientError as e:
error_code = e.response.get('Error', {}).get('Code')
if error_code == 'ResourceNotFoundException':
return {}
raise
return self._snakify(rule_info)
def put(self, enabled=True):
"""Creates or updates the rule in AWS"""
request = {
'Name': self.name,
'State': "ENABLED" if enabled else "DISABLED",
}
if self.schedule_expression:
request['ScheduleExpression'] = self.schedule_expression
if self.event_pattern:
request['EventPattern'] = self.event_pattern
if self.description:
request['Description'] = self.description
if self.role_arn:
request['RoleArn'] = self.role_arn
response = self.client.put_rule(**request)
self.changed = True
return response
def delete(self):
"""Deletes the rule in AWS"""
self.remove_all_targets()
response = self.client.delete_rule(Name=self.name)
self.changed = True
return response
def enable(self):
"""Enables the rule in AWS"""
response = self.client.enable_rule(Name=self.name)
self.changed = True
return response
def disable(self):
"""Disables the rule in AWS"""
response = self.client.disable_rule(Name=self.name)
self.changed = True
return response
def list_targets(self):
"""Lists the existing targets for the rule in AWS"""
try:
targets = self.client.list_targets_by_rule(Rule=self.name)
except botocore.exceptions.ClientError as e:
error_code = e.response.get('Error', {}).get('Code')
if error_code == 'ResourceNotFoundException':
return []
raise
return self._snakify(targets)['targets']
def put_targets(self, targets):
"""Creates or updates the provided targets on the rule in AWS"""
if not targets:
return
request = {
'Rule': self.name,
'Targets': self._targets_request(targets),
}
response = self.client.put_targets(**request)
self.changed = True
return response
def remove_targets(self, target_ids):
"""Removes the provided targets from the rule in AWS"""
if not target_ids:
return
request = {
'Rule': self.name,
'Ids': target_ids
}
response = self.client.remove_targets(**request)
self.changed = True
return response
def remove_all_targets(self):
"""Removes all targets on rule"""
targets = self.list_targets()
return self.remove_targets([t['id'] for t in targets])
def _targets_request(self, targets):
"""Formats each target for the request"""
targets_request = []
for target in targets:
target_request = {
'Id': target['id'],
'Arn': target['arn']
}
if 'input' in target:
target_request['Input'] = target['input']
if 'input_path' in target:
target_request['InputPath'] = target['input_path']
if 'role_arn' in target:
target_request['RoleArn'] = target['role_arn']
if 'ecs_parameters' in target:
target_request['EcsParameters'] = {}
ecs_parameters = target['ecs_parameters']
if 'task_definition_arn' in target['ecs_parameters']:
target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn']
if 'task_count' in target['ecs_parameters']:
target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count']
targets_request.append(target_request)
return targets_request
def _snakify(self, dict):
"""Converts cammel case to snake case"""
return camel_dict_to_snake_dict(dict)
class CloudWatchEventRuleManager(object):
RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
def __init__(self, rule, targets):
self.rule = rule
self.targets = targets
def ensure_present(self, enabled=True):
"""Ensures the rule and targets are present and synced"""
rule_description = self.rule.describe()
if rule_description:
# Rule exists so update rule, targets and state
self._sync_rule(enabled)
self._sync_targets()
self._sync_state(enabled)
else:
# Rule does not exist, so create new rule and targets
self._create(enabled)
def ensure_disabled(self):
"""Ensures the rule and targets are present, but disabled, and synced"""
self.ensure_present(enabled=False)
def ensure_absent(self):
"""Ensures the rule and targets are absent"""
rule_description = self.rule.describe()
if not rule_description:
# Rule doesn't exist so don't need to delete
return
self.rule.delete()
def fetch_aws_state(self):
"""Retrieves rule and target state from AWS"""
aws_state = {
'rule': {},
'targets': [],
'changed': self.rule.changed
}
rule_description = self.rule.describe()
if not rule_description:
return aws_state
# Don't need to include response metadata noise in response
del rule_description['response_metadata']
aws_state['rule'] = rule_description
aws_state['targets'].extend(self.rule.list_targets())
return aws_state
def _sync_rule(self, enabled=True):
"""Syncs local rule state with AWS"""
if not self._rule_matches_aws():
self.rule.put(enabled)
def _sync_targets(self):
"""Syncs local targets with AWS"""
# Identify and remove extraneous targets on AWS
target_ids_to_remove = self._remote_target_ids_to_remove()
if target_ids_to_remove:
self.rule.remove_targets(target_ids_to_remove)
# Identify targets that need to be added or updated on AWS
targets_to_put = self._targets_to_put()
if targets_to_put:
self.rule.put_targets(targets_to_put)
def _sync_state(self, enabled=True):
"""Syncs local rule state with AWS"""
remote_state = self._remote_state()
if enabled and remote_state != 'ENABLED':
self.rule.enable()
elif not enabled and remote_state != 'DISABLED':
self.rule.disable()
def _create(self, enabled=True):
"""Creates rule and targets on AWS"""
self.rule.put(enabled)
self.rule.put_targets(self.targets)
def _rule_matches_aws(self):
"""Checks if the local rule data matches AWS"""
aws_rule_data = self.rule.describe()
# The rule matches AWS only if all rule data fields are equal
# to their corresponding local value defined in the task
return all([
getattr(self.rule, field) == aws_rule_data.get(field, None)
for field in self.RULE_FIELDS
])
def _targets_to_put(self):
"""Returns a list of targets that need to be updated or added remotely"""
remote_targets = self.rule.list_targets()
return [t for t in self.targets if t not in remote_targets]
def _remote_target_ids_to_remove(self):
"""Returns a list of targets that need to be removed remotely"""
target_ids = [t['id'] for t in self.targets]
remote_targets = self.rule.list_targets()
return [
rt['id'] for rt in remote_targets if rt['id'] not in target_ids
]
def _remote_state(self):
"""Returns the remote state from AWS"""
description = self.rule.describe()
if not description:
return
return description['state']
def get_cloudwatchevents_client(module):
"""Returns a boto3 client for accessing CloudWatch Events"""
try:
region, ec2_url, aws_conn_kwargs = get_aws_connection_info(module,
boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in \
EC2_REGION or AWS_REGION environment variables \
or in boto configuration file")
return boto3_conn(module, conn_type='client',
resource='events',
region=region, endpoint=ec2_url,
**aws_conn_kwargs)
except boto3.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
schedule_expression = dict(),
event_pattern = dict(),
state = dict(choices=['present', 'disabled', 'absent'],
default='present'),
description = dict(),
role_arn = dict(),
targets = dict(type='list', default=[]),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
rule_data = dict(
[(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
)
targets = module.params.get('targets')
state = module.params.get('state')
cwe_rule = CloudWatchEventRule(module,
client=get_cloudwatchevents_client(module),
**rule_data)
cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
if state == 'present':
cwe_rule_manager.ensure_present()
elif state == 'disabled':
cwe_rule_manager.ensure_disabled()
elif state == 'absent':
cwe_rule_manager.ensure_absent()
else:
module.fail_json(msg="Invalid state '{0}' provided".format(state))
module.exit_json(**cwe_rule_manager.fetch_aws_state())
if __name__ == '__main__':
main()
|
Nicop06/ansible
|
lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
|
Python
|
gpl-3.0
| 15,715
|
[
"Dalton"
] |
899ee25687911805b4c4c689e4f3eccd5a816f24dcc1c8051e50901f07ec5ea8
|
import numpy as np
# import FitsUtils
import FittingUtilities
import HelperFunctions
import matplotlib.pyplot as plt
import sys
import os
from astropy import units
from astropy.io import fits, ascii
import DataStructures
from scipy.interpolate import InterpolatedUnivariateSpline as interp
import MakeModel
import HelperFunctions
from collections import Counter
from sklearn.gaussian_process import GaussianProcess
import warnings
def SmoothData(order, windowsize=91, smoothorder=5, lowreject=3, highreject=3, numiters=10, expand=0, normalize=True):
denoised = HelperFunctions.Denoise(order.copy())
denoised.y = FittingUtilities.Iterative_SV(denoised.y, windowsize, smoothorder, lowreject=lowreject,
highreject=highreject, numiters=numiters, expand=expand)
if normalize:
denoised.y /= denoised.y.max()
return denoised
def roundodd(num):
rounded = round(num)
if rounded % 2 != 0:
return rounded
else:
if rounded > num:
return rounded - 1
else:
return rounded + 1
def GPSmooth(data, low=0.1, high=10, debug=False):
"""
This will smooth the data using Gaussian processes. It will find the best
smoothing parameter via cross-validation to be between the low and high.
The low and high keywords are reasonable bounds for A and B stars with
vsini > 100 km/s.
"""
smoothed = data.copy()
# First, find outliers by doing a guess smooth
smoothed = SmoothData(data, normalize=False)
temp = smoothed.copy()
temp.y = data.y / smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=3, expand=5)
if len(outliers) > 0:
data.y[outliers] = smoothed.y[outliers]
gp = GaussianProcess(corr='squared_exponential',
theta0=np.sqrt(low * high),
thetaL=low,
thetaU=high,
normalize=False,
nugget=(data.err / data.y) ** 2,
random_start=1)
try:
gp.fit(data.x[:, None], data.y)
except ValueError:
#On some orders with large telluric residuals, this will fail.
# Just fall back to the old smoothing method in that case.
return SmoothData(data), 91
if debug:
print "\tSmoothing parameter theta = ", gp.theta_
smoothed.y, smoothed.err = gp.predict(data.x[:, None], eval_MSE=True)
return smoothed, gp.theta_[0][0]
if __name__ == "__main__":
fileList = []
plot = False
vsini_file = "%s/School/Research/Useful_Datafiles/Vsini.csv" % (os.environ["HOME"])
for arg in sys.argv[1:]:
if "-p" in arg:
plot = True
elif "-vsini" in arg:
vsini_file = arg.split("=")[-1]
else:
fileList.append(arg)
#Read in the vsini table
vsini_data = ascii.read(vsini_file)[10:]
if len(fileList) == 0:
fileList = [f for f in os.listdir("./") if f.endswith("telluric_corrected.fits")]
for fname in fileList:
orders = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="flux", cont="continuum",
errors="error")
#Find the vsini of this star
header = fits.getheader(fname)
starname = header["object"].split()[0].replace("_", " ")
found = False
for data in vsini_data:
if data[0] == starname:
vsini = float(data[1])
found = True
if not found:
outfile = open("Warnings.log", "a")
outfile.write("Cannot find %s in the vsini data: %s\n" % (starname, vsini_file))
outfile.close()
warnings.warn("Cannot find %s in the vsini data: %s" % (starname, vsini_file))
print starname, vsini
#Begin looping over the orders
column_list = []
header_list = []
for i, order in enumerate(orders):
print "Smoothing order %i/%i" % (i + 1, len(orders))
#Fix errors
order.err[order.err > 1e8] = np.sqrt(order.y[order.err > 1e8])
#Linearize
xgrid = np.linspace(order.x[0], order.x[-1], order.x.size)
order = FittingUtilities.RebinData(order, xgrid)
dx = order.x[1] - order.x[0]
smooth_factor = 0.8
theta = roundodd(vsini / 3e5 * order.x.mean() / dx * smooth_factor)
denoised = SmoothData(order,
windowsize=theta,
smoothorder=3,
lowreject=3,
highreject=3,
expand=10,
numiters=10)
#denoised, theta = GPSmooth(order.copy())
#denoised, theta = CrossValidation(order.copy(), 5, 2, 2, 10)
#denoised, theta = OptimalSmooth(order.copy())
#denoised.y *= order.cont/order.cont.mean()
print "Window size = %.4f nm" % theta
column = {"wavelength": denoised.x,
"flux": order.y / denoised.y,
"continuum": denoised.cont,
"error": denoised.err}
header_list.append((("Smoother", theta, "Smoothing Parameter"),))
column_list.append(column)
if plot:
plt.figure(1)
plt.plot(order.x, order.y / order.y.mean())
plt.plot(denoised.x, denoised.y / denoised.y.mean())
plt.title(starname)
plt.figure(2)
plt.plot(order.x, order.y / denoised.y)
plt.title(starname)
#plt.plot(order.x, (order.y-denoised.y)/np.median(order.y))
#plt.show()
if plot:
plt.show()
outfilename = "%s_smoothed.fits" % (fname.split(".fits")[0])
print "Outputting to %s" % outfilename
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new', headers_info=header_list)
|
kgullikson88/HET-Scripts
|
Smooth.py
|
Python
|
gpl-3.0
| 6,217
|
[
"Gaussian"
] |
9bca67d3cef137aec3176935bc1efb3802bd09bcd0d14970e99a76d6ccb00bf0
|
#!/usr/bin/env Python
##########################################################################
#
# Copyright (C) 2015-2017 Sam Westreich
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation;
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##########################################################################
#
# DIAMOND_analysis_counter.py
# Created 8/16/2016, this version created 1/10/2017
# Sam Westreich, stwestreich@ucdavis.edu, github.com/transcript
#
# This program parses through the results file from a DIAMOND annotation run
# (in BLAST m8 format) to get the results into something more compressed
# and readable.
#
# Usage:
#
# -I infile specifies the infile (a DIAMOND results file
# in m8 format)
# -D database specifies a reference database to search against
# for results
# -O organism returns organism results
# -F function returns functional results
# -SO specific org creates a separate outfile for results that hit
# a specific organism
#
##########################################################################
# imports
import operator, sys, time, gzip, re
# String searching function:
def string_find(usage_term):
for idx, elem in enumerate(sys.argv):
this_elem = elem
next_elem = sys.argv[(idx + 1) % len(sys.argv)]
if elem == usage_term:
return next_elem
t0 = time.time()
# checking for an option (organism or function) to be specified
if "-O" not in sys.argv:
if "-F" not in sys.argv:
sys.exit("WARNING: need to specify either organism results (with -O flag in command) or functional results (with -F flag in command).")
# loading starting file
if "-I" in sys.argv:
infile_name = string_find("-I")
else:
sys.exit ("WARNING: infile must be specified using '-I' flag.")
# checking to make sure database is specified
if "-D" in sys.argv:
db_name = string_find("-D")
else:
sys.exit( "No database file indicated; skipping database search step.")
infile = open (infile_name, "r")
# setting up databases
RefSeq_hit_count_db = {}
unique_seq_db = {}
line_counter = 0
# reading through the infile - the DIAMOND results m8 format
print ("\nNow reading through the m8 results infile.")
for line in infile:
line_counter += 1
splitline = line.split("\t")
if line_counter % 1000000 == 0:
t99 = time.time()
print (str(line_counter)[:-6] + "M lines processed so far in " + str(t99-t0) + " seconds.")
unique_seq_db[splitline[0]] = 1
try:
RefSeq_hit_count_db[splitline[1]] += 1
except KeyError:
RefSeq_hit_count_db[splitline[1]] = 1
continue
t1 = time.time()
print ("\nAnalysis of " + infile_name + " complete.")
print ("Number of total lines: " + str(line_counter))
print ("Number of unique sequences: " + str(len(unique_seq_db)))
print ("Time elapsed: " + str(t1-t0) + " seconds.")
infile.close()
# time to search for these in the reference database
db = open (db_name, "r")
print ("\nStarting database analysis now.")
t2 = time.time()
# optional outfile of specific organism results
# if "-SO" in sys.argv:
# target_org = string_find("-SO")
# db_SO_dictionary = {}
# building a dictionary of the reference database
if "-F" in sys.argv:
db_func_dictionary = {}
if "-O" in sys.argv:
db_org_dictionary = {}
db_line_counter = 0
db_error_counter = 0
for line in db:
if line.startswith(">") == True:
db_line_counter += 1
splitline = line.split("[",1)
# ID, the hit returned in DIAMOND results
db_id = str(splitline[0].split()[0])[1:]
# name and functional description
db_entry = line.split("[", 1)
db_entry = db_entry[0].split(" ", 1)
db_entry = db_entry[1][:-1]
# organism name
if line.count("[") != 1:
splitline = line.split("[")
db_org = splitline[line.count("[")].strip()[:-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
try:
if split_db_org[1] == "sp.":
db_org = split_db_org[0] + " " + split_db_org[1] + " " + split_db_org[2]
else:
db_org = split_db_org[1] + " " + split_db_org[2]
except IndexError:
try:
db_org = split_db_org[1]
except IndexError:
db_org = splitline[line.count("[")-1]
if db_org[0].isdigit():
split_db_org = db_org.split()
db_org = split_db_org[1] + " " + split_db_org[2]
else:
db_org = line.split("[", 1)
db_org = db_org[1].split()
try:
db_org = str(db_org[0]) + " " + str(db_org[1])
except IndexError:
db_org = line.strip().split("[", 1)
db_org = db_org[1][:-1]
db_error_counter += 1
db_org = re.sub('[^a-zA-Z0-9-_*. ]', '', db_org)
# add to dictionaries
if "-F" in sys.argv:
db_func_dictionary[db_id] = db_entry
if "-O" in sys.argv:
db_org_dictionary[db_id] = db_org
if "-SO" in sys.argv:
if target_org in db_org:
db_SO_dictionary[db_id] = db_entry
# line counter to show progress
if db_line_counter % 1000000 == 0: # each million
t95 = time.time()
print (str(db_line_counter)[:-6] + "M lines processed so far in " + str(t95-t2) + " seconds.")
t3 = time.time()
print ("\nSuccess!")
print ("Time elapsed: " + str(t3-t2) + " seconds.")
print ("Number of lines: " + str(db_line_counter))
print ("Number of errors: " + str(db_error_counter))
# condensing down the identical matches
condensed_RefSeq_hit_db = {}
for entry in RefSeq_hit_count_db.keys():
try:
if "-O" in sys.argv:
org = db_org_dictionary[entry]
if "-F" in sys.argv:
org = db_func_dictionary[entry]
if org in condensed_RefSeq_hit_db.keys():
condensed_RefSeq_hit_db[org] += RefSeq_hit_count_db[entry]
else:
condensed_RefSeq_hit_db[org] = RefSeq_hit_count_db[entry]
except KeyError:
print ("KeyError:\t" + entry)
continue
if "SO" in sys.argv:
condensed_RefSeq_SO_hit_db = {}
for entry in RefSeq_hit_count_db.keys():
if entry in db_SO_dictionary.values():
org = db_SO_dictionary[entry]
if org in condensed_RefSeq_SO_hit_db.keys():
condensed_RefSeq_SO_hit_db[org] += RefSeq_hit_count_db[entry]
else:
condensed_RefSeq_SO_hit_db[org] = RefSeq_hit_count_db[entry]
# dictionary output and summary
print ("\nDictionary database assembled.")
print ("Time elapsed: " + str(t3-t2) + " seconds.")
print ("Number of errors: " + str(db_error_counter))
if "-O" in sys.argv:
print ("\nTop ten organism matches:")
if "-F" in sys.argv:
print ("\nTop ten function matches:")
for k, v in sorted(condensed_RefSeq_hit_db.items(), key=lambda kv: -kv[1])[:10]:
try:
print (str(v) + "\t" + k )
except KeyError:
print (str(v) + "\tWARNING: Key not found for " + k)
continue
# creating the outfiles
if "-O" in sys.argv:
outfile_name = infile_name[:-4] + "_organism.tsv"
if "-F" in sys.argv:
outfile_name = infile_name[:-4] + "_function.tsv"
if "=SO" in sys.argv:
target_org_outfile = open(infile_name[:-4] + "_" + target_org + ".tsv", "w")
outfile = open (outfile_name, "w")
# writing the output
error_counter = 0
for k, v in sorted(condensed_RefSeq_hit_db.items(), key=lambda kv: -kv[1]):
try:
q = v * 100 / float(line_counter)
outfile.write (str(q) + "\t" + str(v) + "\t" + k + "\n")
except KeyError:
outfile.write (str(q) + "\t" + str(v) + "\tWARNING: Key not found for " + k + "\n")
error_counter += 1
continue
# writing the output if optional specific organism flag is active
if "-SO" in sys.argv:
for k, v in sorted(condensed_RefSeq_SO_hit_db.items(), key=lambda kv: -kv[1]):
try:
q = v * 100 / float(line_counter)
target_org_outfile.write (str(q) + "\t" + str(v) + "\t" + k + "\n")
except KeyError:
target_org_outfile.write (str(q) + "\t" + str(v) + "\tWARNING: Key not found for " + k + "\n")
error_counter += 1
continue
print ("\nAnnotations saved to file: '" + outfile_name + "'.")
print ("Number of errors: " + str(error_counter))
db.close()
outfile.close()
|
transcript/samsa2
|
python_scripts/standardized_DIAMOND_analysis_counter.py
|
Python
|
gpl-3.0
| 8,256
|
[
"BLAST"
] |
b378c33b03c37929f957cd60356d0f0d949c3aca2316cbd32994d2921ab63d7f
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import os
#from numpy import *
import numpy as np
import numpy.matlib
from numpy.matlib import repmat
from scipy.integrate import trapz
import copy as copyModule
#from libc.stdio import printf
from pyhrf import xmlio
from pyhrf.tools import resampleToGrid, get_2Dtable_string
from pyhrf.xmlio.xmlnumpy import NumpyXMLHandler
from pyhrf.ndarray import xndarray
from pyhrf.jde.intensivecalc import calcCorrEnergies, sampleSmmNrl, sampleSmmNrl2,computeYtilde
from pyhrf.jde.intensivecalc import sampleSmmNrlWithRelVar, sampleSmmNrl2WithRelVar, computeYtildeWithRelVar
from pyhrf.jde.samplerbase import *
from pyhrf.jde.beta import *
from pyhrf.boldsynth.spatialconfig import hashMask
from base import *
from pyhrf.tools.aexpression import ArithmeticExpression as AExpr
from pyhrf.tools.aexpression import ArithmeticExpressionNameError, \
ArithmeticExpressionSyntaxError
from pyhrf.stats import compute_roc_labels_scikit, threshold_labels, \
mark_wrong_labels, compute_roc_labels, cpt_ppm_a_mcmc
from scipy.integrate import quad
from pyhrf.tools.io import read_volume
#class NrlChecker:
#def __init__(self):
#self.called = False
#def __call__(self):
class NRLSampler(xmlio.XMLParamDrivenClass, GibbsSamplerVariable):
"""
Class handling the Gibbs sampling of Neural Response Levels with a prior
bi-gaussian mixture model. It handles independent and spatial versions.
Refs : Vincent 2010 IEEE TMI, Makni 2008 Neuroimage, Sockel 2009 ICASSP
#TODO : comment attributes
"""
# parameters specifications :
P_SAMPLE_LABELS = 'sampleLabels'
P_LABELS_INI = 'labelsIni'
P_LABELS_COLORS = 'labelsColors'
P_SAMPLE_FLAG = 'sampleFlag'
P_VAL_INI = 'initialValue'
P_CONTRASTS = 'contrasts'
P_USE_TRUE_NRLS = 'useTrueNrls'
P_USE_TRUE_LABELS = 'useTrueLabels'
P_TrueNrlFilename = 'TrueNrlFilename'
P_TrueLabelsFilename ='TrueLabelsFilename'
P_OUTPUT_CONTRAST = 'writeContrastsOutput'
P_OUTPUT_CONTRAST_VAR = 'writeContrastVariancesOutput'
P_OUTPUT_NRL = 'writeResponsesOutput'
P_OUTPUT_LABELS = 'writeLabelsOutput'
P_WIP_VARIANCE = 'wipVariance'
# parameters definitions and default values :
defaultParameters = {
P_SAMPLE_FLAG : True,
P_VAL_INI : None,
P_USE_TRUE_NRLS : False, #False,
P_USE_TRUE_LABELS : False, #False,
P_SAMPLE_LABELS : True,
P_LABELS_INI : None,
P_LABELS_COLORS : np.array([0.0,0.0], dtype=float),
P_CONTRASTS : {
'dummy_contrast_example' : '0.5 * audio - 0.5 * video'
},
P_OUTPUT_NRL : True,
P_OUTPUT_CONTRAST_VAR : True,
P_OUTPUT_CONTRAST : True,
P_WIP_VARIANCE : False,
'PPM_proba_threshold' : .05,
'PPM_value_threshold' : 0,
'PPM_value_Multi_threshold' : np.arange(0.,4.1,0.1),
'mean_activation_threshold' : 4.,
'rescale_results' : True,
P_TrueNrlFilename : './nrls.nii',
P_TrueLabelsFilename : './labels.nii',
}
if pyhrf.__usemode__ == pyhrf.DEVEL:
defaultParameters[P_OUTPUT_LABELS] = True
parametersToShow = [P_SAMPLE_FLAG, P_VAL_INI, P_USE_TRUE_NRLS,
P_TrueNrlFilename,
P_SAMPLE_LABELS,
P_LABELS_INI, P_USE_TRUE_LABELS,
P_TrueLabelsFilename,
P_LABELS_COLORS, P_CONTRASTS, P_OUTPUT_CONTRAST,
P_OUTPUT_CONTRAST_VAR, P_OUTPUT_NRL,
P_WIP_VARIANCE, 'PPM_proba_threshold',
'PPM_value_threshold','PPM_value_Multi_threshold',
'mean_activation_threshold',
'rescale_results']
elif pyhrf.__usemode__ == pyhrf.ENDUSER:
defaultParameters[P_OUTPUT_LABELS] = False
parametersToShow = [P_CONTRASTS]
parametersComments = {
# P_CONTRASTS : 'Define contrasts as a string with the following format:'\
# '\n condition1-condition2;condition1-condition3\n' \
# 'Must be consistent with condition names specified in session data' \
# 'above',
P_CONTRASTS : 'Define contrasts as arithmetic expressions.\n'\
'Condition names used in expressions must be consistent with ' \
'those specified in session data above',
P_TrueNrlFilename :'Define the filename of simulated NRLs.\n'\
'It is taken into account when NRLs is not sampled.',
P_TrueLabelsFilename :'Define the filename of simulated Labels.\n'\
'It is taken into account when Labels are not sampled.',
}
# other class attributes
L_CI = 0
L_CA = 1
CLASSES = np.array([L_CI, L_CA],dtype=int)
CLASS_NAMES = ['inactiv', 'activ']
FALSE_POS = 2
FALSE_NEG = 3
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
#TODO : comment
xmlio.XMLParamDrivenClass.__init__(self, parameters, xmlHandler,
xmlLabel, xmlComment)
self.sampleLabelsFlag = self.parameters[self.P_SAMPLE_LABELS]
sampleFlag = self.parameters[self.P_SAMPLE_FLAG] or self.sampleLabelsFlag
valIni = self.parameters[self.P_VAL_INI]
useTrueVal = self.parameters[self.P_USE_TRUE_NRLS]
self.TrueNrlsFilename = self.parameters[self.P_TrueNrlFilename]
self.useTrueLabels = self.parameters[self.P_USE_TRUE_LABELS]
self.TrueLabelsFilename = self.parameters[self.P_TrueLabelsFilename]
self.trueLabels = None
an = ['condition', 'voxel']
GibbsSamplerVariable.__init__(self,'nrl', valIni=valIni,
sampleFlag=sampleFlag,
useTrueValue=useTrueVal,
axes_names=an,
value_label='PM NRL')
# instance variables affectation from parameters :
self.labels = self.parameters[self.P_LABELS_INI]
self.contrasts_expr = self.parameters[self.P_CONTRASTS]
self.contrasts_expr.pop('dummy_contrast_example', None)
self.computeContrastsFlag = ( len(self.contrasts_expr) > 0 )
self.activ_thresh = self.parameters['mean_activation_threshold']
#print 'computeContrastsFlag :', self.computeContrastsFlag
#self.parseContrasts(contrasts)
self.nbClasses = len(self.CLASSES)
pyhrf.verbose(6, 'NRLSampler - classes: %s (%d)' \
%(str(self.CLASS_NAMES), self.nbClasses))
self.outputNrls = self.parameters[self.P_OUTPUT_NRL]
self.outputConVars = self.parameters[self.P_OUTPUT_CONTRAST_VAR]
self.outputCons = self.parameters[self.P_OUTPUT_CONTRAST]
self.outputLabels = self.parameters[self.P_OUTPUT_LABELS]
self.labelsMeanHistory = None
self.labelsSmplHistory = None
self.wip_variance_computation = self.parameters[self.P_WIP_VARIANCE]
self.ppm_proba_thresh = self.parameters['PPM_proba_threshold']
self.ppm_value_thresh = self.parameters['PPM_value_threshold']
self.ppm_value_multi_thresh = self.parameters['PPM_value_Multi_threshold']
self.rescale_results = self.parameters['rescale_results']
def linkToData(self, dataInput):
self.dataInput = dataInput
self.nbConditions = self.dataInput.nbConditions
self.nbVox = self.dataInput.nbVoxels
self.ny = self.dataInput.ny
self.nbSessions = self.dataInput.nbSessions
self.cardClass = np.zeros((self.nbClasses, self.nbConditions), dtype=int)
self.voxIdx = [range(self.nbConditions) for c in xrange(self.nbClasses)]
#print dataInput.simulData
#TODO handle condition matching
if dataInput.simulData is not None:
if isinstance(dataInput.simulData, dict):
if dataInput.simulData.has_key('nrls'):
nrls = dataInput.simulData['nrls']
if isinstance(nrls, xndarray):
self.trueValue = nrls.reorient(['condition','voxel']).data
else:
self.trueValue = nrls
if dataInput.simulData.has_key('labels'):
labels = dataInput.simulData['labels']
if isinstance(labels, xndarray):
self.trueLabels = labels.reorient(['condition','voxel']).data
else:
self.trueLabels = labels
elif isinstance(dataInput.simulData, list):
sd = dataInput.simulData[0]
if isinstance(sd, dict):
self.trueValue = sd['nrls'].astype(np.float64)
self.trueLabels = sd['labels'].astype(np.int32)
else:
self.trueValue = sd.nrls.data.astype(np.float64)
self.trueLabels = sd.nrls.labels
else:
self.trueValue = dataInput.simulData.nrls.data.astype(np.float64)
self.trueLabels = dataInput.simulData.nrls.labels
self.trueLabels = self.trueLabels[:self.nbConditions,:].astype(np.int32)
self.trueValue = self.trueValue[:self.nbConditions,:].astype(np.float64)
else:
self.trueLabels = None
#print self.trueLables
def init_contrasts(self):
pyhrf.verbose(3, 'Init of contrasts ...')
pyhrf.verbose(3, 'self.dataInput.cNames: %s'
%str(self.dataInput.cNames))
cnames = self.dataInput.cNames
#print 'cnames', cnames
self.nrls_conds = dict([(str(cond), self.currentValue[icond,:]) \
for icond,cond in enumerate(cnames)] )
if isinstance(self.contrasts_expr, str):
cexpr = dict([('contrast%d'%i,s) \
for i,s in enumerate(self.contrasts_expr.split(";")) \
if len(s) > 0])
else:
cexpr = self.contrasts_expr
cexpr.pop('dummy_contrast_example', None)
#print 'testeeeuu', cexpr.items
self.conds_in_contrasts = dict([(str(cn), \
filter(lambda x: x in cv, cnames)) \
for cn,cv in cexpr.items()] )
#print 'balh:', self.conds_in_contrasts
#print 'self.contrasts_expr', self.contrasts_expr
self.contrasts_calc = dict([ (str(cn),AExpr(str(e), **self.nrls_conds)) \
for cn,e in cexpr.iteritems() ])
#print 'self.contrasts_calc', self.contrasts_calc.values()
#for cn,e in cexpr.iteritems():
#print 'AExpr(str(e), **nrls_conds):', (AExpr(str(e), **nrls_conds))
for cn,cc in self.contrasts_calc.iteritems():
try:
cc.check()
except ArithmeticExpressionNameError, err:
msg = 'Error in definition of contrast "%s":' %cn
pyhrf.verbose(1,msg)
pyhrf.verbose(1,'Unknown conditions: ' + ', '.join(err.args[2]))
pyhrf.verbose(1,'Expression was: "%s"' %err.args[1])
raise err
except ArithmeticExpressionSyntaxError, err:
msg = 'Syntax error in definition of contrast %s:' %cn
pyhrf.verbose(1,msg)
pyhrf.verbose(1, 'expression was: ' + err.args[1])
raise err
self.cumulContrast = dict([ (cn,np.zeros(self.nbVox)) \
for cn,e in cexpr.iteritems() ])
#print 'cexpr.iteritems: ', cexpr.iteritems()
self.cumul2Contrast = dict([ (cn,np.zeros(self.nbVox)) \
for cn,e in cexpr.iteritems() ])
def checkAndSetInitValue(self, variables):
self.checkAndSetInitLabels(variables)
self.checkAndSetInitNRL(variables)
def checkAndSetInitLabels(self, variables):
pyhrf.verbose(1, 'NRLSampler.checkAndSetInitLabels ...')
# Generate default labels if necessary :
#print 'blab', self.useTrueLabels
if 0 and self.useTrueLabels:
if self.trueLabels is not None:
pyhrf.verbose(3, 'Use true label values ...')
#TODO : take only common conditions
self.labels = self.trueLabels.copy()
#print 'True labels : ', self.labels.shape
#HACK
#tmpl = np.zeros_like(self.labels)
#tmpl[:,:self.nbVox/30] = 1
#tmpl = np.array([np.random.permutation(t) for t in tmpl])
#self.labels = np.bitwise_or(self.labels, tmpl)
# print '~~~~~~~'
# print 'tmpl', tmpl
# print np.unique(self.labels)
# print 'labels:',self.labels
else:
raise Exception('True labels have to be used but none defined.')
if 1 and self.useTrueLabels:
if self.TrueLabelsFilename is not None:
pyhrf.verbose(3, 'Use true labels values ...')
self.labels = np.zeros((self.nbConditions, self.nbVox),
dtype=np.int32)
TrueLabels = read_volume(self.TrueLabelsFilename)
for cond in np.arange(TrueLabels[0].shape[3]):
count=0
for i in np.arange(TrueLabels[0].shape[0]):
for j in np.arange(TrueLabels[0].shape[1]):
for k in np.arange(TrueLabels[0].shape[2]):
self.labels[cond,count] = TrueLabels[0][i,j,k,cond]
count += 1
self.trueLabels = self.labels.copy()
else:
raise Exception('True labels have to be used but none defined.')
if self.labels is None : # if no initial labels specified
pyhrf.verbose(1, 'Labels are not initialized -> random init')
if 0:
self.labels = np.zeros((self.nbConditions, self.nbVox),
dtype=np.int32)
nbVoxInClass = np.zeros(self.nbClasses, dtype=int) \
+ self.nbVox/self.nbClasses
nbVoxInClass[0] = self.nbVox-nbVoxInClass[1:].sum()
#nbVoxInClass0 = self.nbVox/2
# Uniform dispatching :
for j in xrange(self.nbConditions) :
l = []
for c in xrange(self.nbClasses) :
l += [self.CLASSES[c]] * nbVoxInClass[c]
self.labels[j,:] = np.random.permutation(l)
else:
# sometimes it's better to put all voxels in the activating class
# -> mixture components are less likely to degenerate
#self.labels = np.ones((self.nbConditions, self.nbVox), dtype=np.int32)
sh = (self.nbConditions, self.nbVox)
self.labels = np.random.randint(0, 2, np.prod(sh)).reshape(sh).astype(np.int32)
#self.labels = np.zeros((self.nbConditions, self.nbVox), dtype=np.int32)
#print 'here'
#nlabs = self.nbConditions * self.nbVox
if 0:
self.labels = np.random.binomial(1,0.9,nlabs).reshape(self.nbConditions, self.nbVox).astype(np.int32)
#print 'self.labels to see', self.labels, self.labels.shape
pyhrf.verbose(5, 'init labels :')
pyhrf.verbose.printNdarray(6, self.labels)
self.countLabels(self.labels, self.voxIdx, self.cardClass)
def checkAndSetInitNRL(self, variables):
pyhrf.verbose(3, 'NRLSampler.checkAndSetInitNRLs ...')
if self.currentValue is None :
if 0 and self.useTrueValue:
if self.trueValue is None:
raise Exception('Needed a true value for nrls init but '\
'None defined')
else:
self.currentValue = self.trueValue.astype(np.float64)
elif 1 and self.useTrueValue:
if self.TrueNrlsFilename is not None:
pyhrf.verbose(3, 'Use true Nrls values ...')
self.currentValue = np.zeros((self.nbConditions, self.nbVox),
dtype=np.float64)
TrueNrls = read_volume(self.TrueNrlsFilename)
for cond in np.arange(TrueNrls[0].shape[3]):
count=0
for i in np.arange(TrueNrls[0].shape[0]):
for j in np.arange(TrueNrls[0].shape[1]):
for k in np.arange(TrueNrls[0].shape[1]):
self.currentValue[cond,count] = TrueNrls[0][i,j,k,cond]
count += 1
self.trueValue = self.currentValue.copy()
else:
raise Exception('Needed a true value for nrls init but '\
'None defined')
else:
#nrlsIni = np.zeros((self.nbConditions, self.nbVox), dtype=np.float64)
## Init Nrls according to classes definitions :
#smplMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
## ensure that mixture parameters are correctly set
#smplMixtP.checkAndSetInitValue(variables)
#var = smplMixtP.getCurrentVars()
#means = smplMixtP.getCurrentMeans()
#for j in xrange(self.nbConditions):
#for c in xrange(self.nbClasses):
#iv = self.voxIdx[c][j]
#nrlsIni[j,iv] = np.random.randn(self.cardClass[c,j]) \
#* var[c,j]**0.5 + means[c,j]
#self.currentValue = nrlsIni
##HACK (?)
#self.currentValue = np.zeros((self.nbConditions, self.nbVox),
#dtype=np.float64) + 20
#self.currentValue = (np.random.rand(self.nbConditions, self.nbVox).astype(np.float64) - .5 ) * 10
#self.currentValue = (np.random.rand(self.nbConditions, self.nbVox).astype(np.float64) - .5 ) * 10
#print 'nrlsIni : ', self.currentValue
#Initialise nrls using initial labels
if 1:
self.currentValue = np.zeros((self.nbConditions, self.nbVox),dtype=np.float64)
#Mixt_par = self.samplerEngine.getVariable('mixt_params')
#Mean_CA = Mixt_par.currentValue[Mixt_par.I_MEAN_CA,:]
#Var_CA = Mixt_par.currentValue[Mixt_par.I_VAR_CA,:]
#Var_CI = Mixt_par.currentValue[Mixt_par.I_VAR_CI,:]
Mean_CA = 30. * np.ones(self.nbConditions)
Var_CA = 1. * np.ones(self.nbConditions)
Var_CI = 1. * np.ones(self.nbConditions)
#Mean_CA = 2. * np.ones(self.nbConditions)
#Var_CA = 0.5 * np.ones(self.nbConditions)
#Var_CI = 0.5 * np.ones(self.nbConditions)
for m in xrange(self.nbConditions):
Ac_pos = np.where(self.labels[m])
Nrls = np.random.randn((self.nbVox))*Var_CI[m]**0.5 + 0
Nrls[Ac_pos[0]] = np.random.randn((Ac_pos[0]).size)*Var_CA[m]**0.5 + Mean_CA[m]
self.currentValue[m] = Nrls.astype(np.float64)
def countLabels(self, labels, voxIdx, cardClass):
pyhrf.verbose(3, 'NRLSampler.countLabels ...')
# print 'countLabels .......'
# print labels.shape
# print len(voxIdx), len(voxIdx[0])
# print len(cardClass)
for j in xrange(self.nbConditions):
for c in xrange(self.nbClasses):
if len(labels.shape) == 2:
labs_c = labels
else:
labs_c = labs_c
try:
voxIdx[c][j] = np.where(labels[j,:]==self.CLASSES[c])[0]
except Exception, e:
print e
print '~~~~~~~~~~~~~~~~~~'
print j,c
print labels.shape
print labels[j,:].shape
print len(voxIdx), len(voxIdx[0])
print self.CLASSES[c]
print np.where(labels[j,:]==self.CLASSES[c])[0]
raise e
cardClass[c,j] = len(voxIdx[c][j])
pyhrf.verbose(5, 'Nb vox in C%d for cond %d : %d' \
%(c,j,cardClass[c,j]))
#assert self.cardClass[:,j].sum() == self.nbVox
def initObservables(self):
pyhrf.verbose(3, 'NRLSampler.initObservables ...')
GibbsSamplerVariable.initObservables(self)
self.meanLabels = None
shape_utile = np.zeros((3))
self.cumulLabels = np.zeros((self.nbClasses,)+np.shape(self.currentValue),
dtype=np.float32) #insert the final nb of iterations
self.count_above_thresh = np.zeros_like(self.currentValue).astype(int)
self.count_above_Multi_thresh = np.zeros((len(self.ppm_value_multi_thresh),self.currentValue.shape[0],self.currentValue.shape[1])).astype(int)
#print 'self.count_above_Multi_thresh shape =',self.count_above_Multi_thresh.shape
#print 'self.currentValue', self.currentValue, self.currentValue.shape
#self.cumulLabels_all_iterations = np.zeros((self.nbClasses,)+np.shape(self.currentValue,)+shape_utile.shape,
#dtype=float)
#print 'Concerning shape of labels', self.cumulLabels_all_iterations.shape
#print 'Concerning labels', self.cumulLabels.shape, self.nbClasses, self.currentValue.shape, self.nbConditions, self.nbVox
#print self.nbItObservables
#print 'Pour voir sur iterations', self.nbIterations, 'test'
if self.computeContrastsFlag:
self.init_contrasts()
if 0 and self.computeContrastsFlag:
#for c1 in self.cumulContrast.iterkeys():
#for c2 in self.cumulContrast[c1].iterkeys():
#self.cumulContrast[c1][c2] = np.zeros(self.nbVox, dtype=float) #sum of Linear Combination of nrls
#self.cumul2Contrast[c1][c2] = np.zeros(self.nbVox, dtype=float) #sum square of Linear Combination of nrls
self.cumulContrast_Lc_Rc = np.zeros(self.nbVox, dtype=float) #Lc-Rc
self.cumul2Contrast_Lc_Rc = np.zeros(self.nbVox, dtype=float)
self.cumulContrast_V_A = np.zeros(self.nbVox, dtype=float) #V-A
self.cumul2Contrast_V_A = np.zeros(self.nbVox, dtype=float)
self.cumulContrast_C_S = np.zeros(self.nbVox, dtype=float) #C-S
self.cumul2Contrast_C_S = np.zeros(self.nbVox, dtype=float)
self.cumulContrast_C_S_A = np.zeros(self.nbVox, dtype=float) #C-S_A
self.cumul2Contrast_C_S_A = np.zeros(self.nbVox, dtype=float)
################# CHANGE HERE DEPENDING ON THE NUMBER OF ITERATIONS !!
if self.wip_variance_computation:
self.saveNRL = np.zeros((2000,self.nbConditions, self.nbVox),
dtype=float)
sh = (self.nbClasses,) + np.shape(self.currentValue)
self.diff_nrl_mean_masked = np.zeros(sh,dtype=float)
self.diff_nrl_mean_non_masked = np.zeros(np.shape(self.currentValue),
dtype=float)
self.Covar_masked = np.zeros((4,self.nbVox), dtype=float)
self.Covar_non_masked = np.zeros((self.nbVox), dtype=float)
self.varCon_2cond_corr_masked = np.zeros((4,self.nbVox),
dtype=float)
self.varCon_2cond_corr_apost = np.zeros((4,self.nbVox),
dtype=float)
#return self.cumulLabels_all_iterations
self.cumul_mean_apost = np.zeros_like(self.meanClassApost).astype(np.float32)
self.cumul_var_apost = np.zeros_like(self.varClassApost).astype(np.float32)
#self.sum_nrls_carr_tot_cond = np.zeros((self.nbClasses,)+np.shape(self.currentValue),dtype=float)
self.sum_nrls_carr_both_classes_cond = np.zeros((self.nbClasses,)+np.shape(self.currentValue),dtype=float)
#self.sum_nrls_carr_class_inactiv_cond = np.zeros((self.nbClasses,)+np.shape(self.currentValue),dtype=float)
#self.sum_nrls_tot_cond = np.zeros(np.shape(self.currentValue),dtype=float)
self.sum_nrls_both_classes_cond = np.zeros((self.nbClasses,)+np.shape(self.currentValue),dtype=float)
#self.sum_nrls_class_inactiv_cond = np.zeros(np.shape(self.currentValue),dtype=float)
self.finalVariances = np.zeros(np.shape(self.currentValue),
dtype=np.float32)
self.final_mean_var_a_post = np.zeros(np.shape(self.currentValue),
dtype=np.float32)
self.varCon_2cond_indep_masked = np.zeros((4,self.nbVox),
dtype=np.float32)
self.varCon_2cond_indep_apost = np.zeros((4,self.nbVox),
dtype=np.float32)
def updateObsersables(self):
pyhrf.verbose(4, 'NRLSampler.updateObsersables ...')
GibbsSamplerVariable.updateObsersables(self)
sHrf = self.samplerEngine.getVariable('hrf')
sScale = self.samplerEngine.getVariable('scale')
if sHrf.sampleFlag and np.allclose(sHrf.normalise,0.) and \
not sScale.sampleFlag and self.sampleFlag:
pyhrf.verbose(6, 'Normalizing Posterior mean of NRLs at each iteration ...')
#print '%%%% scaling NRL PME %%% - hnorm = ', sHrf.norm
# Undo previous mean calculation:
self.cumul -= self.currentValue
self.cumul3 -= (self.currentValue - self.mean)**2
#self.cumul2 -= self.currentValue**2
# Use scaled quantities instead:
self.cumul += self.currentValue * sHrf.norm
#self.cumul2 += (self.currentValue * sHrf.norm)**2
self.mean = self.cumul / self.nbItObservables
self.cumul3 += (self.currentValue * sHrf.norm - self.mean)**2
self.error = self.cumul3 / self.nbItObservables
#self.error = self.cumul2 / self.nbItObservables - \
#self.mean**2
for c in xrange(self.nbClasses):
self.cumulLabels[c,:,:] += (self.labels==c)
#self.cumulLabels_all_iterations[c,:,:,
#print 'labels at iteration'
#print self.cumulLabels[c]
#print self.labels
self.meanLabels = self.cumulLabels / self.nbItObservables
self.count_above_thresh += self.currentValue > self.ppm_value_thresh
self.freq_above_thresh = self.count_above_thresh / self.nbItObservables
#print 'self.currentValue.shape =',self.currentValue.shape
#print 'self.currentValue[2,:].sum() =',self.currentValue[2,:].sum()
for i in xrange(len(self.ppm_value_multi_thresh)):
self.count_above_Multi_thresh[i] += abs(self.currentValue) >= self.ppm_value_multi_thresh[i]
self.freq_above_Multi_thresh = self.count_above_Multi_thresh / self.nbItObservables
#print 'Mean labels at each iteration', self.meanLabels, self.meanLabels.shape
#print 'Cumul labels at each iteration', self.cumulLabels[c], self.cumulLabels.shape, self.cumulLabels[c].shape
#print 'Concerning classes', self.nbClasses
#print 'cumulLabels_all_iterations', self.cumulLabels_all_iterations.shape, self.cumulLabels_all_iterations[:,:,:,1].shape
#print 'Current value:', self.currentValue, self.currentValue.shape
#print 'labels at iteration:'
#print self.labels, self.labels.shape
if self.wip_variance_computation:
#To save value at each iteration
for cond in xrange(self.nbConditions):
#self.sum_nrls_tot_cond[cond] += (self.currentValue[cond,:])
#self.sum_nrls_carr_tot_cond[cond] += (self.currentValue[cond,:])**2
for c in xrange(self.nbClasses):
#self.sum_nrls_class_activ_cond[cond] += (self.currentValue[cond,:])*self.labels[cond,:]
#self.sum_nrls_class_inactiv_cond[cond] += self.sum_nrls_tot_cond[cond] - self.sum_nrls_class_activ_cond[cond]
#self.sum_nrls_carr_tot_cond[cond] += (self.currentValue[cond,:])**2
#self.sum_nrls_carr_class_activ_cond[cond] += ((self.currentValue[cond,:])**2)*self.labels[cond,:]
#self.sum_nrls_carr_class_inactiv_cond[cond] += self.sum_nrls_carr_tot_cond[cond] - self.sum_nrls_carr_class_activ_cond[cond]
#print 'TO TEST: ', self.sum_nrls_both_classes_cond[c,cond,:], self.currentValue[cond,:], self.labels[cond,:]
if c==1:
self.sum_nrls_both_classes_cond[c,cond,:] += \
(self.currentValue[cond,:])* self.labels[cond,:]
self.sum_nrls_carr_both_classes_cond[c,cond] += \
((self.currentValue[cond,:])**2)*self.labels[cond,:]
elif c==0:
self.sum_nrls_both_classes_cond[c,cond,:] += \
(self.currentValue[cond,:])*(1-self.labels[cond,:])
self.sum_nrls_carr_both_classes_cond[c,cond] += \
((self.currentValue[cond,:])**2) * \
(1-self.labels[cond,:])
#print 'TO TEST after attribution: ', self.sum_nrls_both_classes_cond[c,cond,:], self.currentValue[cond,:], self.labels[cond,:]
if 0:
print 'Verification about self.sum_nrls_both_classes_cond on voxels 0 to 3:',
print self.sum_nrls_both_classes_cond[:,:,310:316]
#print 'prod tot carr:'
#print self.sum_nrls_carr_tot_cond, self.sum_nrls_carr_tot_cond.shape
#print 'prod activ carr:'
#print self.sum_nrls_carr_class_activ_cond, self.sum_nrls_carr_class_activ_cond.shape
#print 'prod inactiv carr:'
#print self.sum_nrls_carr_class_inactiv_cond, self.sum_nrls_carr_class_inactiv_cond.shape
#print 'prod tot:'
#print self.sum_nrls_tot_cond, self.sum_nrls_tot_cond.shape
#print 'prod activ:'
#print self.sum_nrls_class_activ_cond, self.sum_nrls_class_activ_cond.shape
#print 'prod inactiv:'
#print self.sum_nrls_class_inactiv_cond, self.sum_nrls_class_inactiv_cond.shape
if pyhrf.verbose.verbosity > 4:
print 'Non zeros positions for cumulLabels:'
print np.where(self.cumulLabels[1,1,:]>0)
print 'Non zeros for self.sum_nrls_carr_class_activ_cond and self.sum_nrls_class_activ_cond :'
#print np.where(self.sum_nrls_carr_class_activ_cond>0)
#print np.where(self.sum_nrls_class_activ_cond>0)
print 'Non zeros for labels :',
print np.where(self.labels[1,:]>0)
#print 'Test:', self.sum_nrls_carr_class_activ_cond.shape, self.sum_nrls_carr_class_activ_cond[298], self.sum_nrls_class_activ_cond[298]
pyhrf.verbose(4,'nb of iterations: %d' %self.nbItObservables)
pyhrf.verbose(4,'computeContrastsFlag: %s',
str(self.computeContrastsFlag))
#print 'self.computeContrastsFlag:', self.computeContrastsFlag
#To save value at each iteration
if self.wip_variance_computation:
self.saveNRL[self.nbItObservables-1,:,:] = self.currentValue
#print 'self.saveNRL.shape', self.saveNRL.shape
#print 'self.saveNRL', self.saveNRL[self.nbItObservables-1,0,:], self.saveNRL[self.nbItObservables-1,0,:].shape
#print 'current value to compare:', self.currentValue[0,:]
if self.computeContrastsFlag:
cv = self.currentValue
if 0:
print "Current value used for con:", cv.shape
print "nrl values:", cv[0,310:316], cv[1,310:316],
print "con values?:", 2*cv[1,310:316]- cv[0,310:316]
#print 'blob', self.cumulContrast
#for cname, cumul in self.cumulContrast.iteritems():
#print 'self.cumulContrast[cname] debut:', cname, self.cumulContrast[cname]
#print 'self.cumul2Contrast[cname] debut:', cname, self.cumul2Contrast[cname]
##print 'cname:', cname
#contrast = self.contrasts_calc[cname].evaluate()
#print 'self.contrasts_calc[cname].evaluate(): ', self.contrasts_calc[cname].evaluate()
#self.cumulContrast[cname] += contrast
#self.cumul2Contrast[cname] += contrast**2
#print 'self.cumulContrast.iteritems() :', self.cumulContrast.values()
for cname, cumul in self.cumulContrast.iteritems():
if 0:
print 'cname:', cname
print 'nbIt', self.nbItObservables
#print 'self.cumulContrast[cname]:', cname, self.cumulContrast[cname]
contrast = self.contrasts_calc[cname].evaluate()
#print cname, 'self.contrasts_calc[cname].evaluate(): ', self.contrasts_calc[cname].evaluate()
self.cumulContrast[cname] += contrast
#print cname, 'cumul:', cumul
for cname, cumul2 in self.cumul2Contrast.iteritems():
if 0:
print 'cname:', cname
print 'nbIt', self.nbItObservables
#print 'self.cumul2Contrast[cname]:', cname, self.cumul2Contrast[cname]
contrast2 = (self.contrasts_calc[cname].evaluate())**2
#print cname, 'self.contrasts_calc[cname].evaluate() carre: ', (self.contrasts_calc[cname].evaluate())**2
self.cumul2Contrast[cname] += contrast2
#print cname, 'cumul2:', cumul2
#Contrast = sum of two conditions --> variances estimate study
#contrast_V-A = cv
#B/II 1/
if self.wip_variance_computation:
for cond in xrange(self.nbConditions):
self.diff_nrl_mean_non_masked[cond,:] += self.saveNRL[self.nbItObservables, cond,:] - self.saveNRL[self.nbItObservables, cond,:].mean()
self.NRL_activ_masked = (self.saveNRL[self.nbItObservables, cond,:])*self.labels[cond,:]
self.NRL_inactiv_masked = (self.saveNRL[self.nbItObservables, cond,:])*(1-self.labels[cond,:])
for c in xrange(self.nbClasses):
if c==1:
self.diff_nrl_mean_masked[c,cond,:] += ( self.NRL_activ_masked - self.NRL_activ_masked.mean() )
elif c==0:
self.diff_nrl_mean_masked[c,cond,:] += ( self.NRL_inactiv_masked - self.NRL_inactiv_masked.mean() )
# ic1 = self.dataInput.cNames.index(c1)
# for c2 in self.cumulContrast[c1].iterkeys():
# ic2 = self.dataInput.cNames.index(c2)
# diff = self.currentValue[ic1,:]-self.currentValue[ic2,:]
# self.cumulContrast[c1][c2] += diff
# self.cumul2Contrast[c1][c2] += diff**2
#To get infos on object dataInput : print its fields (line1) or print its classe (line2)
#print dir(self.dataInput) #.cNames
#print self.dataInput.__class__
#print self.dataInput.cNames
#print cumulContrast_Lc_Rc
if 0 and ('calculaudio' in self.dataInput.cNames):
ic1 = self.dataInput.cNames.index('calculaudio')
ic2 = self.dataInput.cNames.index('calculvideo')
ic3 = self.dataInput.cNames.index('clicDaudio')
ic4 = self.dataInput.cNames.index('clicDvideo')
ic5 = self.dataInput.cNames.index('clicGaudio')
ic6 = self.dataInput.cNames.index('clicGvideo')
ic7 = self.dataInput.cNames.index('damier_H')
ic8 = self.dataInput.cNames.index('damier_V')
ic9 = self.dataInput.cNames.index('phraseaudio')
ic10 = self.dataInput.cNames.index('phrasevideo')
cv = self.currentValue
#print 'CURRENT VALUE:', self.currentValue, self.currentValue.shape
contrast_Lc_Rc = cv[ic5,:] + cv[ic6,:] - \
cv[ic3,:] - cv[ic4,:] #Lc-Rc
self.cumulContrast_Lc_Rc += contrast_Lc_Rc
self.cumul2Contrast_Lc_Rc += contrast_Lc_Rc**2
contrast_V_A = cv[ic2,:] + cv[ic4,:] + cv[ic6,:] +\
cv[ic10,:] - cv[ic1,:] - cv[ic3,:] - cv[ic5,:] - cv[ic9,:] #V-A
self.cumulContrast_V_A += contrast_V_A
self.cumul2Contrast_V_A += contrast_V_A**2
contrast_C_S = cv[ic1,:] + cv[ic2,:] - cv[ic9,:] - cv[ic10,:] #C-S
self.cumulContrast_C_S += contrast_C_S
self.cumul2Contrast_C_S += contrast_C_S**2
contrast_C_S_A = cv[ic1,:] - cv[ic9,:] #C-S_A
self.cumulContrast_C_S_A += contrast_C_S_A
self.cumul2Contrast_C_S_A += contrast_C_S_A**2
# mean of posterior components:
self.cumul_mean_apost += self.meanClassApost
self.cumul_var_apost += self.varClassApost
self.mean_mean_apost = self.cumul_mean_apost / self.nbItObservables
self.mean_var_apost = self.cumul_var_apost / self.nbItObservables
if 0:
print 'nb of iterations', self.nbItObservables
print 'tests for voxels 0 to 3 - condition 1:'
print 'labels for the current iteration:',
print self.labels[0,310:316]
print 'nrls for the current iteration:',
print self.currentValue[0,310:316]
print 'Sum nrls for class inactiv: self.sum_nrls_both_classes_cond[c=0,cond=0,310:316]:'
print self.sum_nrls_both_classes_cond[0,0,310:316]
print 'Sum nrls for class activ: self.sum_nrls_both_classes_cond[c=1,cond=0,310:316]:'
print self.sum_nrls_both_classes_cond[1,0,310:316]
print 'Sum carr nrls for class inactiv: self.sum_nrls_carr_both_classes_cond[c=0,cond=0,310:316]:'
print self.sum_nrls_carr_both_classes_cond[0,0,310:316]
print 'Sum carr nrls for class activ: self.sum_nrls_carr_both_classes_cond[c=1,cond=0,310:316]:'
print self.sum_nrls_carr_both_classes_cond[1,0,310:316]
print '--'
print 'self.mean_var_apost[0,0,310:316] - 1ere condition, classe inactive:',
print self.mean_var_apost[0,0,310:316]
print 'self.mean_var_apost[1,0,310:316] - 1ere condition, classe active:',
print self.mean_var_apost[1,0,310:316]
print '#########################################'
print 'tests for voxels 0 to 3 - condition 2:'
print 'labels for the current iteration:',
print self.labels[1,310:316]
print 'nrls for the current iteration:',
print self.currentValue[1,310:316]
print 'Sum nrls for class inactiv: self.sum_nrls_both_classes_cond[c=0,cond=1,310:316]:'
print self.sum_nrls_both_classes_cond[0,1,310:316]
print 'Sum nrls for class activ: self.sum_nrls_both_classes_cond[c=1,cond=1,310:316]:'
print self.sum_nrls_both_classes_cond[1,1,310:316]
print 'Sum carr nrls for class inactiv: self.sum_nrls_carr_both_classes_cond[c=0,cond=1,310:316]:'
print self.sum_nrls_carr_both_classes_cond[0,1,310:316]
print 'Sum carr nrls for class activ: self.sum_nrls_carr_both_classes_cond[c=1,cond=1,310:316]:'
print self.sum_nrls_carr_both_classes_cond[1,1,310:316]
print '--'
print 'self.mean_var_apost[0,1,310:316] - 2eme condition, classe inactive:',
print self.mean_var_apost[0,1,310:316]
print 'self.mean_var_apost[1,1,310:316] - 2eme condition, classe active:',
print self.mean_var_apost[1,1,310:316]
def saveObservables(self, it):
GibbsSamplerVariable.saveObservables(self, it)
if self.labelsMeanHistory is not None :
self.labelsMeanHistory = np.concatenate((self.labelsMeanHistory,
[self.meanLabels]))
else :
self.labelsMeanHistory = np.array([self.meanLabels.copy()])
#print 'save trucs'
def saveCurrentValue(self, it):
#print 'self.labels', self.labels
GibbsSamplerVariable.saveCurrentValue(self, it)
if self.labelsSmplHistory is not None :
self.labelsSmplHistory = np.concatenate((self.labelsSmplHistory,
[self.labels]))
else :
self.labelsSmplHistory = np.array([self.labels.copy()])
def cleanObservables(self):
GibbsSamplerVariable.cleanObservables(self)
if 0: #hack to save cumulLabels if necessary
del self.cumulLabels
self.cleanMemory()
def PPMcalculus(threshold_value, apost_mean_activ, apost_var_activ, \
apost_mean_inactiv, apost_var_inactiv, labels_activ, labels_inactiv):
'''
Function to calculate the probability that the nrl in voxel j,
condition m, is superior to a given hreshold_value
'''
m1 = apost_mean_activ
sig1 = apost_var_activ
m2 = apost_mean_inactiv
sig2 = apost_var_inactiv
perc1 = labels_activ #proportion of samples drawn from the activ class
perc2 = labels_inactiv #proportion of samples drawn from the inactiv class
#posterior probability distribution
fmix = lambda t: perc1 * 1/np.sqrt(2*np.pi*sig1**2)*np.exp(- (t - m1)**2 / (2*sig1**2) ) + perc2 * 1/np.sqrt(2*np.pi*sig2**2)*np.exp(- (t - m2)**2 / (2*sig2**2) )
Proba = quad(fmix, threshold_value, float('inf'))[0]
return Proba
def ThresholdPPM(proba_voxel, threshold_pval):
if proba_voxel > threshold_pval:
Proba = proba_voxel
elif proba_voxel > threshold_pval:
Proba = None
return Proba
def samplingWarmUp(self, variables):
"""
#TODO : comment
"""
# Precalculations and allocations :
smplHRF = variables[self.samplerEngine.I_HRF]
self.imm = self.samplerEngine.getVariable('beta').currentValue[0] < 0
self.varYtilde = np.zeros((self.ny, self.nbVox), dtype=np.float64)
self.aXh = np.empty((self.nbVox, self.ny, self.nbConditions), dtype=float)
self.vycArray = np.zeros((self.nbVox, self.ny, self.nbConditions))
self.sumaXh = np.zeros((self.ny, self.nbVox), dtype=float)
self.computeVarYTildeOpt(smplHRF.varXh)
self.varXhtQ = np.empty((self.nbConditions,self.ny),dtype=float)
self.varClassApost = np.zeros((self.nbClasses,self.nbConditions,self.nbVox),
dtype=np.float64)
self.sigClassApost = np.zeros((self.nbClasses,self.nbConditions,self.nbVox),
dtype=float)
self.meanClassApost = np.zeros((self.nbClasses,self.nbConditions,
self.nbVox), dtype=np.float64)
self.meanApost = np.zeros((self.nbConditions, self.nbVox), dtype=float)
self.sigApost = np.zeros((self.nbConditions, self.nbVox), dtype=float)
self.aa = np.zeros((self.nbConditions, self.nbConditions, self.nbVox),
dtype=float)
if self.imm:
self.sumRmatXhtQXh = np.zeros((self.nbConditions,self.nbVox),dtype=float)
self.varXjhtQjeji = np.empty((self.nbVox), dtype=float)
self.computeAA(self.currentValue, self.aa)
self.iteration = 0
def computeAA(self, nrls, destaa):
# aa[m,n,:] == aa[n,m,:] -> nb ops can be /2
for j in xrange(self.nbConditions):
for k in xrange(self.nbConditions):
np.multiply(nrls[j,:], nrls[k,:],
destaa[j,k,:])
def computeVarYTildeOpt(self, varXh):
# C function:
pyhrf.verbose(6, 'Calling C function computeYtilde ...')
computeYtilde(varXh, self.currentValue, self.dataInput.varMBY,
self.varYtilde, self.sumaXh)
#print 'sumaXh = ', self.sumaXh
#print 'varYtilde = ', self.varYtilde
#print 'Ytilde computing is finished ...'
pyhrf.verbose(5,'varYtilde %s' %str(self.varYtilde.shape))
pyhrf.verbose.printNdarray(5, self.varYtilde)
def sampleNextAlt(self, variables):
varXh = variables[self.samplerEngine.I_HRF].varXh
self.computeVarYTildeOpt(varXh)
def computeComponentsApost(self, variables, j, gTQg):
sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
var = sIMixtP.getCurrentVars()
mean = sIMixtP.getCurrentMeans()
rb = variables[self.samplerEngine.I_NOISE_VAR].currentValue
varXh = variables[self.samplerEngine.I_HRF].varXh
nrls = self.currentValue
gTQgjrb = gTQg[j]/rb
if pyhrf.verbose > 4:
print 'Current components:'
print 'mean CI = %f, var CI = %f' %(mean[self.L_CI,j], var[self.L_CI,j])
print 'mean CA = %f, var CA = %f' %(mean[self.L_CA,j], var[self.L_CA,j])
print 'gTQg =', gTQg[j]
pyhrf.verbose(6, 'gTQg[%d] %s:'%(j,str(gTQg[j].shape)))
pyhrf.verbose.printNdarray(6, gTQg[j])
pyhrf.verbose(6, 'rb %s :'%str(rb.shape))
pyhrf.verbose.printNdarray(6, rb)
pyhrf.verbose(6, 'gTQgjrb %s :'%str(gTQgjrb.shape))
pyhrf.verbose.printNdarray(6, gTQgjrb)
ej = self.varYtilde + nrls[j,:] \
* repmat(varXh[:,j],self.nbVox, 1).transpose()
pyhrf.verbose(6, 'varYtilde %s :'%str((self.varYtilde.shape)))
pyhrf.verbose.printNdarray(6, self.varYtilde)
pyhrf.verbose(6, 'nrls[%d,:] %s :'%(j,nrls[j,:]))
pyhrf.verbose.printNdarray(6, nrls[j,:])
pyhrf.verbose(6, 'varXh[:,%d] %s :'%(j,str(varXh[:,j].shape)))
pyhrf.verbose.printNdarray(6, varXh[:,j])
pyhrf.verbose(6, 'repmat(varXh[:,%d],self.nbVox, 1).transpose()%s:' \
%(j,str((repmat(varXh[:,j],self.nbVox, 1).transpose().shape))))
pyhrf.verbose.printNdarray(6, repmat(varXh[:,j],self.nbVox, 1).transpose())
pyhrf.verbose(6, 'ej %s :'%str((ej.shape)))
pyhrf.verbose.printNdarray(6, ej)
np.divide(np.dot(self.varXhtQ[j,:],ej), rb, self.varXjhtQjeji)
if pyhrf.verbose.verbosity > 5:
pyhrf.verbose(5, 'np.dot(self.varXhtQ[j,:],ej) %s :' \
%str(np.dot(self.varXhtQ[j,:],ej).shape))
pyhrf.verbose.printNdarray(5, np.dot(self.varXhtQ[j,:],ej))
pyhrf.verbose(5, 'self.varXjhtQjeji %s :' \
%str(self.varXjhtQjeji.shape))
pyhrf.verbose.printNdarray(5, self.varXjhtQjeji)
for c in xrange(self.nbClasses):
#print 'var[%d,%d] :' %(c,j), var[c,j]
#print 'mean[%d,%d] :' %(c,j), mean[c,j]
self.varClassApost[c,j,:] = 1./(1./var[c,j] + gTQgjrb)
if 0:
print 'shape of self.varClassApost[c,j,:] :', \
self.varClassApost.shape
#print 'varClassApost[%d,%d,:]:' %(c,j), self.varClassApost[c,j,:]
np.sqrt(self.varClassApost[c,j,:], self.sigClassApost[c,j,:])
if c > 0: # assume 0 stands for inactivating class
np.multiply(self.varClassApost[c,j,:],
add(mean[c,j]/var[c,j], self.varXjhtQjeji),
self.meanClassApost[c,j,:])
else:
np.multiply(self.varClassApost[c,j,:], self.varXjhtQjeji,
self.meanClassApost[c,j,:])
pyhrf.verbose(5, 'meanClassApost %d cond %d :'%(c,j))
pyhrf.verbose.printNdarray(5, self.meanClassApost[c,j,:])
pyhrf.verbose(5, 'varClassApost %d cond %d :'%(c,j))
pyhrf.verbose.printNdarray(5, self.varClassApost[c,j,:])
pyhrf.verbose(5, 'shape of self.varClassApost[c,j,:] : %s' \
%str(self.varClassApost.shape))
def computeVarXhtQ(self, h, varXQ):
for j in xrange(self.nbConditions):
self.varXhtQ[j,:] = np.dot(h,varXQ[j,:,:])
def sampleNrlsSerial(self, rb, h, varCI, varCA, meanCA ,
gTQg, variables):
pyhrf.verbose(3, 'Sampling Nrls (serial, spatial prior) ...')
pyhrf.verbose(3, 'Label sampling: ' + str(self.sampleLabelsFlag))
sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
var = sIMixtP.getCurrentVars()
mean = sIMixtP.getCurrentMeans()
rb = variables[self.samplerEngine.I_NOISE_VAR].currentValue
# Add one dimension to be consistent with habituation model
varXh = np.array([variables[self.samplerEngine.I_HRF].varXh], dtype=np.float64)
nrls = self.currentValue
neighbours = self.dataInput.neighboursIndexes
beta = self.samplerEngine.getVariable('beta').currentValue
voxOrder = np.random.permutation(self.nbVox)
sampleSmmNrl2(voxOrder.astype(np.int32), rb.astype(np.float64),
neighbours.astype(np.int32), self.varYtilde,
self.labels, varXh, self.currentValue,
self.nrlsSamples.astype(np.float64),
self.labelsSamples.astype(np.float64),
np.array([self.varXhtQ]).astype(np.float64),
gTQg.astype(np.float64),
beta.astype(np.float64), mean.astype(np.float64),
var.astype(np.float64), self.meanClassApost,
self.varClassApost, self.nbClasses,
self.sampleLabelsFlag+0, self.iteration,
self.nbConditions)
if (self.varClassApost<=0).any():
raise Exception('Negative posterior variances!')
self.countLabels(self.labels, self.voxIdx, self.cardClass)
def printState(self, verboseLevel):
if pyhrf.verbose.verbosity >= verboseLevel:
for j in xrange(self.nbConditions):
#pyhrf.verbose(verboseLevel, 'All nrl cond %d:'%j)
#pyhrf.verbose.printNdarray(verboseLevel, self.currentValue[j,:])
pyhrf.verbose(verboseLevel, 'nrl cond %d = %1.3f(%1.3f)' \
%(j,self.currentValue[j,:].mean(),
self.currentValue[j,:].std()))
for c in xrange(self.nbClasses):
#pyhrf.verbose(verboseLevel, 'All nrl %s cond %d:' \
# %(self.CLASS_NAMES[c],j))
ivc = self.voxIdx[c][j]
#pyhrf.verbose.printNdarray(verboseLevel,
# self.currentValue[j,ivc])
pyhrf.verbose(verboseLevel, 'nrl %s cond %d = %1.3f(%1.3f)' \
%(self.CLASS_NAMES[c],j,
self.currentValue[j,ivc].mean(),
self.currentValue[j,ivc].std()))
def sampleNrlsParallel(self, varXh, rb, h, varLambda, varCI, varCA,
meanCA, gTQg, variables):
pyhrf.verbose(3, 'Sampling Nrls (parallel, no spatial prior) ...')
for j in xrange(self.nbConditions):
self.computeComponentsApost(variables, j, gTQg)
if self.sampleLabelsFlag:
pyhrf.verbose(3, 'Sampling labels - cond %d ...'%j)
self.sampleLabels(j, variables)
self.countLabels(self.labels, self.voxIdx, self.cardClass)
pyhrf.verbose(3,'Sampling labels done!')
pyhrf.verbose(6, 'All labels cond %d:'%j)
pyhrf.verbose.printNdarray(6, self.labels[j,:])
if self.trueLabels is not None:
pyhrf.verbose(6, 'All true labels cond %d:'%j)
pyhrf.verbose.printNdarray(6, self.trueLabels[j,:])
for c in xrange(self.nbClasses):
putmask(self.sigApost[j,:], self.labels[j,:]==c,
self.sigClassApost[c,j,:])
putmask(self.meanApost[j,:],self.labels[j,:]==c,
self.meanClassApost[c,j,:])
oldVal = self.currentValue[j,:]
add(np.multiply(self.nrlsSamples[j,:], self.sigApost[j,:]),
self.meanApost[j,:], self.currentValue[j,:])
self.computeVarYTildeOpt(varXh)
def sampleNextInternal(self, variables):
#TODO : comment
sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
varCI = sIMixtP.currentValue[sIMixtP.I_VAR_CI]
varCA = sIMixtP.currentValue[sIMixtP.I_VAR_CA]
meanCA = sIMixtP.currentValue[sIMixtP.I_MEAN_CA]
rb = variables[self.samplerEngine.I_NOISE_VAR].currentValue
sHrf = variables[self.samplerEngine.I_HRF]
varXh = sHrf.varXh
h = sHrf.currentValue
self.nh = np.size(h)
varLambda = variables[self.samplerEngine.I_WEIGHTING_PROBA].currentValue
#Ytilde(:,i) = Ytilde(:,i) + ( CptStruct.nrl_old(j,i) - ...
# CptStruct.nrl(j,i)) * Xh(:,j);
pyhrf.verbose(5,'varXh %s :' %str(varXh.shape))
pyhrf.verbose.printNdarray(5, varXh)
self.computeVarYTildeOpt(varXh)
self.computeVarXhtQ(h, self.dataInput.matXQ)
pyhrf.verbose(6,'varXhtQ %s :' %str(self.varXhtQ.shape))
pyhrf.verbose.printNdarray(5, self.varXhtQ)
self.labelsSamples = np.random.rand(self.nbConditions, self.nbVox)
#print 'labelsSamples = ', self.labelsSamples
self.nrlsSamples = np.random.randn(self.nbConditions, self.nbVox)
gTQg = np.diag(np.dot(self.varXhtQ,varXh))
if self.imm:
self.sampleNrlsParallel(varXh, rb, h, varLambda, varCI,
varCA, meanCA, gTQg, variables)
else: #MMS
self.sampleNrlsSerial(rb, h, varCI, varCA, meanCA, gTQg, variables)
self.computeVarYTildeOpt(varXh)
if (self.currentValue >= 1000).any() and pyhrf.__usemode__ == pyhrf.DEVEL:
pyhrf.verbose(2, "Weird NRL values detected ! %d/%d" \
%((self.currentValue >= 1000).sum(),
self.nbVox*self.nbConditions) )
#pyhrf.verbose.set_verbosity(6)
if pyhrf.verbose.verbosity >= 4:
self.reportDetection()
self.computeAA(self.currentValue, self.aa)
self.printState(4)
self.iteration += 1 #TODO : factorize !!
#print 'nrl = ', self.currentValue
def reportDetection(self):
if self.trueLabels is not None:
try:
for j in xrange(self.nbConditions):
wrong = np.where(self.trueLabels[j,:] != self.labels[j,:])
print 'Nb of wrongly detected :', len(wrong[0])
if len(wrong[0]) > 0:
print 'False inactivating:'
for w in wrong[0]:
if self.trueLabels[j,w] != 0 and self.labels[j,w] == 0:
print 'it%04d-cond%02d-vox%03d : nrl = %f' \
%(self.iteration,j,w,self.currentValue[j,w])
print 'False activating:'
for w in wrong[0]:
if self.trueLabels[j,w] != 1 and self.labels[j,w] == 1:
print 'it%04d-cond%02d-vox%03d : nrl = %f' \
%(self.iteration,j,w,self.currentValue[j,w])
if self.nbClasses == 3:
print 'False deactivating:'
for w in wrong[0]:
if self.trueLabels[j,w] != 2 and self.labels[j,w] == 2:
print 'it%04d-cond%02d-vox%03d : nrl = %f' \
%(self.iteration,j,w,self.currentValue[j,w])
except Exception:
# may happen if nb conditions in simulation != nb conditions
# when estimating
pass
def calcFracLambdaTilde(self, cond, c1, c2, variables):
sMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
sWeightP = variables[self.samplerEngine.I_WEIGHTING_PROBA]
varLambda = sWeightP.currentValue
var = sMixtP.getCurrentVars()
means = sMixtP.getCurrentMeans()
if self.samplerEngine.getVariable('beta').currentValue[cond] <= 0:
ratio = ( varLambda[c1] * var[c2]**0.5 ) \
/(varLambda[c2] * var[c1]**0.5 )
else:
ratio = (var[c2]/var[c1])**0.5
return ratio[cond] * ( self.sigClassApost[c1,cond,:] \
/self.sigClassApost[c2,cond,:] ) * \
np.exp(0.5*(self.meanClassApost[c1,cond,:]**2 \
/self.varClassApost[c1,cond,:] \
-self.meanClassApost[c2,cond,:]**2\
/self.varClassApost[c2,cond,:] \
- means[c1, cond]**2 \
/ var[c1, cond] \
+ means[c2, cond]**2 \
/ var[c2, cond] \
)\
)
# def calcFracLambdaTilde
def sampleLabels(self, cond, variables):
fracLambdaTilde = self.calcFracLambdaTilde(cond, self.L_CI, self.L_CA,
variables)
varLambdaApost = 1./(1.+fracLambdaTilde)
self.labels[cond,:] = self.labelsSamples[cond,:]<=varLambdaApost
if pyhrf.verbose > 6:
for i in xrange(self.nbVox):
print 'it%04d-cond%02d-Vox%03d ...' %(self.iteration,cond,i)
print 'mApostCA =', self.meanClassApost[self.L_CA,cond,i],
print 'mApostCI =', self.meanClassApost[self.L_CI,cond,i]
print 'sApostCA =', self.sigClassApost[self.L_CA,cond,i],
print 'sApostCI =', self.sigClassApost[self.L_CI,cond,i]
print 'rl_I_A =', fracLambdaTilde[i]
print 'lambda Apost CA =', varLambdaApost[i]
print 'random =', self.labelsSamples[cond,i]
print '-> labels = ', self.labels[cond,i]
def getFinalLabels(self, thres=None):
#def getFinalLabels(self, thres):
# take the argmax over classes
return threshold_labels(self.meanLabels)
#return threshold_labels(self.meanLabels, thres)
#print self.cumulContrast
def computeContrasts(self):
#print self.contrasts_calc.iterkeys()
pyhrf.verbose(2, 'computeContrasts ...')
#print '~~~~~~~~~ self.cumulContrast :'
#print self.cumulContrast_Lc_Rc
# compute final contrasts:
self.contrasts = {}
self.contrastsVar = {}
nit = self.nbItObservables
pyhrf.verbose(5,'pour les contrastes: dict: %s'
%str(self.contrasts_calc))
for name in self.contrasts_calc.iterkeys():
#print 'name contraste: ', name
self.contrasts[name] = self.cumulContrast[name]/nit
self.contrastsVar[name] = self.cumul2Contrast[name]/nit - \
self.contrasts[name]**2
#print 'Les contrastes', self.contrasts
#print 'Les variances', self.contrastsVar
#print self.contrasts_calc.iterkeys
if self.wip_variance_computation and \
('calculaudio' in self.dataInput.cNames):
#for c1 in self.cumulContrast.iterkeys():
#self.contrasts[c1] = {}
#self.contrastsVar[c1] = {}
#for c2 in self.cumulContrast[c1].iterkeys():
#self.contrasts[c1][c2] = self.cumulContrast[c1][c2] \
#/ self.nbItObservables
#self.contrastsVar[c1][c2] = self.cumul2Contrast[c1][c2] \
#/ self.nbItObservables \
#- self.contrasts[c1][c2]**2
#print "COntrastsss!!"
#print self.cumulContrast_Lc_Rc
#Lc-Rc
self.contrast_Lc_Rc = self.cumulContrast_Lc_Rc / self.nbItObservables
self.contrast_var_Lc_Rc = self.cumul2Contrast_Lc_Rc / self.nbItObservables - self.contrast_Lc_Rc**2
#V-A
self.contrast_V_A = self.cumulContrast_V_A / self.nbItObservables
self.contrast_var_V_A = self.cumul2Contrast_V_A / self.nbItObservables - self.contrast_V_A**2
#C-S
self.contrast_C_S = self.cumulContrast_C_S / self.nbItObservables
self.contrast_var_C_S = self.cumul2Contrast_C_S / self.nbItObservables - self.contrast_C_S**2
#C-S_A
self.contrast_C_S_A = self.cumulContrast_C_S_A / self.nbItObservables
self.contrast_var_C_S_A = self.cumul2Contrast_C_S_A / self.nbItObservables - self.contrast_C_S_A**2
#print '%%%%%% contrast :',
#print self.contrasts
#For the variances estimates study: two conditions case
#A/II 1/ Independant conditions
#A/II 2/ Recuperation of the variance a posteriori for the good runs
if 0:
print 'self.varcontrast_cond_both_classes shape:', self.varcontrast_cond_both_classes.shape
print 'self.varCon_2cond_indep_masked shape:', self.varCon_2cond_indep_masked.shape
print 'shapes Covar and diff_nrl_mean:', self.Covar_non_masked.shape, self.diff_nrl_mean_non_masked.shape, (self.diff_nrl_mean_non_masked[0,:]*self.diff_nrl_mean_non_masked[1,:]).shape
print 'shape Covar_masked', self.Covar_masked.shape
#print 'Concerning contrasts: self.contrasts_calc.iteritems(), self.contrasts_calc[0]: ', self.contrasts_calc.keys()[0]
print 'self.nbConditions:', self.nbConditions
#print 'shapes self.contrastsVar, self.Covar_non_masked:', self.contrastsVar.shape,
self.Covar_non_masked[:] = self.diff_nrl_mean_non_masked[0,:]*self.diff_nrl_mean_non_masked[1,:]
#print 'self.contrastsVar[self.contrasts_calc.keys()[0]]', self.contrastsVar[self.contrasts_calc.keys()[0]]
print 'self.contrasts_calc:'
print self.contrasts_calc
if 0:
self.varCon_2cond_corr_non_masked = self.contrastsVar[self.contrasts_calc.keys()[0]] + self.contrastsVar[self.contrasts_calc.keys()[1]] -2*self.Covar_non_masked #works only in this case of 2cond and contrast = Cond1-Cond2
if 0:
print '##################'
print 'self.finalLabels[0,310:316] - 1ere condition:',
print self.finalLabels[0,310:316]
print 'self.varcontrast_cond_both_classes[0,0,310:316] - 1ere condition, classe inactive:',
print self.varcontrast_cond_both_classes[0,0,310:316]
print 'self.varcontrast_cond_both_classes[0,1,310:316] - 1ere condition, classe active::',
print self.varcontrast_cond_both_classes[1,0,310:316]
print '--'
print 'self.finalVariances[0,310:316]:',
print self.finalVariances[0,310:316]
print '--'
print 'self.final_mean_var_a_post[0,310:316] - 1ere condition:',
print self.final_mean_var_a_post[0,310:316]
print ' ######'
print 'self.finalLabels[0,310:316] - 2eme condition:',
print self.finalLabels[1,310:316]
print 'self.varcontrast_cond_both_classes[1,0,310:316] - 2eme condition, classe inactive:',
print self.varcontrast_cond_both_classes[0,1,310:316]
print 'self.varcontrast_cond_both_classes[1,1,310:316] - 2eme condition, classe active::',
print self.varcontrast_cond_both_classes[1,1,310:316]
print '--'
print 'self.finalVariances[1,310:316]:',
print self.finalVariances[1,310:316]
print '--'
print 'self.final_mean_var_a_post[1,310:316] - 2eme condition:',
print self.final_mean_var_a_post[1,310:316]
if (self.nbConditions > 1):
pyhrf.verbose(1, 'Computing Contrasts ...')
for j in xrange(self.nbVox):
#First case: labels at 0 for both conditions
if (self.finalLabels[0,j]==0) & (self.finalLabels[1,j]==0):
#case independant
self.varCon_2cond_indep_masked[0,j] = \
self.varcontrast_cond_both_classes[0,0,j] + \
self.varcontrast_cond_both_classes[0,1,j]
self.varCon_2cond_indep_apost[0,j] = \
self.mean_var_apost[0,0,j] + self.mean_var_apost[0,1,j]
#case correlation
self.Covar_masked[0,j] = \
self.diff_nrl_mean_masked[0,0,j] * \
self.diff_nrl_mean_masked[0,1,j]
self.varCon_2cond_corr_masked[0,j] = \
self.varCon_2cond_indep_masked[0,j] - \
2*self.Covar_masked[0,j]
self.varCon_2cond_corr_apost[0,j] = \
self.mean_var_apost[0,0,j] + \
self.mean_var_apost[0,1,j] - 2*self.Covar_masked[0,j]
#Second case: labels at 0 for cond1 and at 1 for cond2
elif (self.finalLabels[0,j]==0) & (self.finalLabels[1,j]==1):
#case independant
self.varCon_2cond_indep_masked[1,j] = \
self.varcontrast_cond_both_classes[0,0,j] + \
self.varcontrast_cond_both_classes[1,1,j]
self.varCon_2cond_indep_apost[1,j] = \
self.mean_var_apost[0,0,j] + self.mean_var_apost[1,1,j]
#case correlation
self.Covar_masked[1,j] = self.diff_nrl_mean_masked[0,0,j] * \
self.diff_nrl_mean_masked[1,1,j]
self.varCon_2cond_corr_masked[1,j] = \
self.varCon_2cond_indep_masked[1,j] - \
2*self.Covar_masked[1,j]
self.varCon_2cond_corr_apost[1,j] = \
self.mean_var_apost[0,0,j] + \
self.mean_var_apost[1,1,j] - 2*self.Covar_masked[1,j]
#Third case: labels at 1 for cond1 and at 0 for cond2
elif (self.finalLabels[0,j]==1) & (self.finalLabels[1,j]==0):
#case independant
self.varCon_2cond_indep_masked[2,j] = \
self.varcontrast_cond_both_classes[1,0,j] + \
self.varcontrast_cond_both_classes[0,1,j]
self.varCon_2cond_indep_apost[2,j] = \
self.mean_var_apost[1,0,j] + self.mean_var_apost[0,1,j]
#case correlation
self.Covar_masked[2,j] = self.diff_nrl_mean_masked[1,0,j] * \
self.diff_nrl_mean_masked[0,1,j]
self.varCon_2cond_corr_masked[2,j] = \
self.varCon_2cond_indep_masked[2,j] - \
2*self.Covar_masked[2,j]
self.varCon_2cond_corr_apost[2,j] = \
self.mean_var_apost[1,0,j] + \
self.mean_var_apost[0,1,j] - \
2*self.Covar_masked[2,j]
#Fourth case: labels at 1 for both conditions
elif (self.finalLabels[0,j]==1) & (self.finalLabels[1,j]==1):
#case independant
self.varCon_2cond_indep_masked[3,j] = \
self.varcontrast_cond_both_classes[1,0,j] + \
self.varcontrast_cond_both_classes[1,1,j]
self.varCon_2cond_indep_apost[3,j] = \
self.mean_var_apost[1,0,j] + self.mean_var_apost[1,1,j]
#case correlation
self.Covar_masked[3,j] = self.diff_nrl_mean_masked[1,0,j] * \
self.diff_nrl_mean_masked[1,1,j]
self.varCon_2cond_corr_masked[3,j] = \
self.varCon_2cond_indep_masked[3,j] - \
2*self.Covar_masked[3,j]
self.varCon_2cond_corr_apost[3,j] = \
self.mean_var_apost[1,0,j] + \
self.mean_var_apost[1,1,j] - 2*self.Covar_masked[3,j]
#print 'finalVariances: ', self.finalVariances, \
# self.finalVariances.shape
#TOFIX !!
# for contrast,conds in self.conds_in_contrasts.iteritems():
# iconds = [cnames.index(c) for c in conds]
# cov = np.vstack([self.Covar[cpl[0],cpl[1],:] \
# for cpl in couples(iconds)]).sum(0)
# self.contrast_var[contrast] = \
# self.varcontrast_singleton[mask_class, icond, :].sum(1) + cov
else:
pyhrf.verbose(1, 'We have one condition only, no '\
'contrast computing ...')
if 0:
print '##################'
print 'self.mean_var_apost', self.mean_var_apost[1,0,:]
print 'self.mean_var_apost', self.mean_var_apost[1,1,:]
print '--'
print 'self.varCon_2cond_indep_masked[0,310:316]: - 1ere cas',
print self.varCon_2cond_indep_masked[0,310:316]
print 'self.varCon_2cond_indep_masked[1,310:316]: - 2eme cas',
print self.varCon_2cond_indep_masked[1,310:316]
print 'self.varCon_2cond_indep_masked[2,310:316]: - 3eme cas',
print self.varCon_2cond_indep_masked[2,310:316]
print 'self.varCon_2cond_indep_masked[3,310:316]: - 4eme cas',
print self.varCon_2cond_indep_masked[3,310:316]
print '--'
print 'self.varCon_2cond_indep_apost[0,310:316]: - 1ere cas',
print self.varCon_2cond_indep_apost[0,310:316]
print 'self.varCon_2cond_indep_apost[1,310:316]: - 2eme cas',
print self.varCon_2cond_indep_apost[1,310:316]
print 'self.varCon_2cond_indep_apost[2,310:316]: - 3eme cas',
print self.varCon_2cond_indep_apost[2,310:316]
print 'self.varCon_2cond_indep_apost[3,310:316]: - 4eme cas',
print self.varCon_2cond_indep_apost[3,310:316]
print '-----'
print 'Positions different from zeros:'
print 'self.mean_var_apost >0:', np.where(self.mean_var_apost>0)
def get_final_summary(self):
s = GibbsSamplerVariable.get_final_summary(self)
vi = [range(self.nbConditions) for c in xrange(self.nbClasses)]
cc = np.zeros((self.nbClasses, self.nbConditions), dtype=int)
s += ' labels sampling report: \n'
self.countLabels(self.finalLabels, vi, cc)
sv = get_2Dtable_string(cc.T, self.dataInput.cNames, self.CLASS_NAMES,
precision=0)
if '\n' in sv:
s += ' - final labels:\n' + sv
else:
s += ' - final labels: ' + sv + '\n'
if self.trueLabels is not None:
#nlabs = len(unique(self.trueLabels))
nlabs = self.nbClasses
vi = [range(self.nbConditions) for c in xrange(nlabs)]
cc = np.zeros((nlabs, self.nbConditions), dtype=int)
self.countLabels(self.trueLabels, vi, cc)
if nlabs <= self.nbClasses:
sv = get_2Dtable_string(cc.T, self.dataInput.cNames,
self.CLASS_NAMES[:nlabs], precision=0)
else:
cn = self.CLASS_NAMES+['C%d'%c for c in xrange(self.nbClasses,
nlabs+1)]
sv = get_2Dtable_string(cc.T, self.dataInput.cNames, cn, precision=0)
if '\n' in sv:
s += ' - true labels:\n' + sv
else:
s += ' - true labels: ' + sv + '\n'
if self.trueLabels.shape == self.finalLabels.shape:
#TODO: adapt to cases where nbClasses differ btw true and estim
errorRate = np.zeros((self.nbConditions, self.nbClasses))
for j in xrange(self.nbConditions):
for c in xrange(self.nbClasses):
# select which true labels are not in the
# considered class:
tlnotc = (self.trueLabels[j,:] != c)
# select which estimated labels are in the
# considered class:
flc = (self.finalLabels[j,:] == c)
# select which labels are classified in the
# considered class and were not truely in this class:
diffs = np.bitwise_and(flc,tlnotc)
errorRate[j,c] = diffs.sum()*100. / cc[c,j]
sv = get_2Dtable_string(errorRate, self.dataInput.cNames,
self.CLASS_NAMES,precision=1)
if '\n' in sv:
s += ' - error (percent):\n' + sv
else:
s += ' - error (percent): ' + sv + '\n'
return s
def cleanMemory(self):
self.meanClassApost = self.meanClassApost.astype(np.float32)
self.varClassApost = self.varClassApost.astype(np.float32)
self.meanLabels = self.meanLabels.astype(np.float32)
self.freq_above_thresh = self.freq_above_thresh.astype(np.float32)
self.freq_above_Multi_thresh = self.freq_above_Multi_thresh.astype(np.float32)
# clean memory of temporary variables :
if self.imm:
del self.sumRmatXhtQXh
del self.varXjhtQjeji
# del self.varClassApost
# del self.meanClassApost
del self.sigClassApost
del self.sigApost
del self.meanApost
if hasattr(self, 'aa'):
del self.aa
if hasattr(self, 'aXh'):
del self.aXh
if hasattr(self, 'varYtilde'):
del self.varYtilde
if hasattr(self, 'varXhtQ'):
del self.varXhtQ
if hasattr(self, 'sumaXh'):
del self.sumaXh
if hasattr(self, 'vycArray'):
del self.vycArray
if hasattr(self,'labelsSamples'):
del self.labelsSamples
if hasattr(self,'nrlsSamples'):
del self.nrlsSamples
#del self.corrEnergies
del self.labels
del self.voxIdx
if self.wip_variance_computation:
del self.saveNRL
if not self.wip_variance_computation: #and self.computeContrastsFlag:
del self.cumulLabels
#del self.mean_both_classes_cond
#del self.mean_mean_apost
#del self.mean_var_apost
# del self.cumul2Contrast_C_S_A
# del self.cumul2Contrast_C_S
# del self.cumul2Contrast_Lc_Rc
# del self.cumul2Contrast_V_A
# del self.cumulContrast_C_S_A
# del self.cumulContrast_Lc_Rc
# del self.cumulContrast_V_A
# del self.cumulContrast_C_S
del self.cumul_mean_apost
del self.cumul_var_apost
#del self.diff_nrl_mean_masked
#del self.diff_nrl_mean_non_masked
del self.final_mean_var_a_post
# del self.varCon_2cond_corr_apost
# del self.varCon_2cond_corr_masked
del self.varCon_2cond_indep_apost
del self.varCon_2cond_indep_masked
# del self.varcontrast_cond_both_classes
del self.finalVariances
del self.sum_nrls_carr_both_classes_cond
del self.sum_nrls_both_classes_cond
# del self.Covar_non_masked
# del self.Covar_masked
def markWrongLabels(self, labels):
if self.trueLabels != None:
for j in xrange(self.nbConditions):
#print 'labels :'
#print labels[j,:]
#print 'trueLabels '
#print self.trueLabels[j,:]
el = (labels[j,:] == self.L_CA)
tl = (self.trueLabels[j,:] == self.L_CA)
nel = np.bitwise_not(el)
ntl = np.bitwise_not(tl)
labels[j, np.bitwise_and(el,tl)] = self.L_CA
labels[j, np.bitwise_and(nel,ntl)] = self.L_CI
labels[j, np.bitwise_and(el,ntl)] = self.FALSE_POS
labels[j, np.bitwise_and(nel,tl)] = self.FALSE_NEG
#print '-> marked :'
#print labels[j,:]
def finalizeSampling(self):
GibbsSamplerVariable.finalizeSampling(self)
self.finalLabels = self.getFinalLabels()
#self.finalLabels = self.getFinalLabels(0.8722)
#self.markWrongLabels(self.finalLabels)
#print 'finalLabels.shape', self.finalLabels.shape
smplHRF = self.samplerEngine.getVariable('hrf')
# Correct sign ambiguity :
if hasattr(smplHRF, 'detectSignError'):
sign_error = smplHRF.detectSignError()
pyhrf.verbose(2, 'sign error - Flipping nrls')
self.finalValue_sign_corr = self.finalValue * (1-2*sign_error)
# Correct hrf*nrl scale ambiguity :
scaleF = smplHRF.getScaleFactor()
# Use HRF amplitude :
pyhrf.verbose(3, 'scaleF=%1.2g' %scaleF)
pyhrf.verbose(3, 'self.finalValue : %1.2g - %1.2g' \
%(self.finalValue.min(), self.finalValue.max()))
self.finalValueScaleCorr = self.finalValue * scaleF
if self.computeContrastsFlag and self.wip_variance_computation:
#Work on varainces estimates
#self.masked_var_cond
########
#np.seterr(all='ignore') # to ignore warning when nan is generated or when there's divide by zero, here some parts of cummulLabels are equal
# to zero so we have nan in "self.mean_both_classes_cond"
########
#print 'sum_nrls_both_classes_cond :',self.sum_nrls_both_classes_cond
#print 'cumulLabels :', self.cumulLabels
self.mean_both_classes_cond = self.sum_nrls_both_classes_cond / self.cumulLabels
#print 'mean_both_classes_cond :', self.mean_both_classes_cond
#self.mean_class_inactiv_cond = self.sum_nrls_class_inactiv_cond / self.cumulLabels[1,0,:]
self.varcontrast_cond_both_classes = self.sum_nrls_carr_both_classes_cond / self.cumulLabels - (self.mean_both_classes_cond)**2
#self.varcontrast_cond_class_inactiv = self.sum_nrls_carr_class_inactiv_cond / self.cumulLabels[1,0,:] - (self.mean_class_inactiv_cond)**2
#print 'self.mean_class_activ_cond', self.mean_class_activ_cond, self.mean_class_activ_cond.shape
#print 'self.varcontrast_cond_class_activ', self.varcontrast_cond_class_activ, self.varcontrast_cond_class_activ.shape
#print 'shapes:', self.sum_nrls_carr_both_classes_cond.shape, self.cumulLabels[1,0,:].shape, self.mean_both_classes_cond.shape, self.currentValue.shape
#print 'self.finalVariances.shape:', self.finalVariances.shape
#print 'self.varcontrast_cond_class_activ.shape', self.varcontrast_cond_both_classes.shape
for icond in xrange(self.nbConditions):
for j in xrange(self.nbVox):
if self.finalLabels[icond,j]==1:
self.finalVariances[icond,j] = self.varcontrast_cond_both_classes[1,icond,j]
self.final_mean_var_a_post[icond,j] = self.mean_var_apost[1,icond,j]
elif self.finalLabels[icond,j]==0:
self.finalVariances[icond,j] = self.varcontrast_cond_both_classes[0,icond,j]
self.final_mean_var_a_post[icond,j] = self.mean_var_apost[0,icond,j]
#print 'Comparisons between self.varcontrast_cond_both_classes and self.finalVariances:'
#print self.varcontrast_cond_both_classes, self.finalVariances
#print 'self.finalLabels', self.finalLabels
#print 'tests end for condition1: '
#print 'Variances for class activ: self.varcontrast_cond_both_classes[c=1, cond=0, 310:316]:'
#print self.varcontrast_cond_both_classes[1, 0, 310:316]
#print 'Variances for class inactiv: self.varcontrast_cond_both_classes[c=0, cond=0, 310:316]:'
#print self.varcontrast_cond_both_classes[0, 0, 310:316]
#print 'Final variances for condition1: self.finalVariances[cond=0, 310:316]:'
#print self.finalVariances[0, 310:316]
#print '##################'
#print 'tests for condition 2:'
#print 'Variances for class activ: self.varcontrast_cond_both_classes[c=1, cond=1, 310:316]:'
#print self.varcontrast_cond_both_classes[1, 1, 310:316]
#print 'Variances for class inactiv: self.varcontrast_cond_both_classes[c=0, cond=1, 310:316]:'
#print self.varcontrast_cond_both_classes[0, 1, 310:316]
#print 'Final variances for condition1: self.finalVariances[cond=1, 310:316]:'
#print self.finalVariances[1, 310:316]
#print 'Non zeros positions for self.mean_both_classes_cond :'
#print np.where(self.mean_both_classes_cond>0)
#print 'Non zeros for self.sum_nrls_carr_both_classes_cond :'
#print np.where(self.sum_nrls_carr_both_classes_cond>0)
#print 'Non zeros for self.sum_nrls_both_classes_cond :'
#print np.where(self.sum_nrls_both_classes_cond>0)
#print 'Non zeros positions for self.varcontrast_cond_both_classes :'
#print np.where(self.varcontrast_cond_both_classes>0)
#print 'To compare:', (self.sum_nrls_carr_class_activ_cond / self.cumulLabels[1,0,:])[169], (self.mean_class_activ_cond**2)[169]
# -------------- For contrast variance analysis -------------------------------
#print 'self.finalLabels infos :', np.where(self.finalLabels==1), np.where(self.finalLabels==0)
if 0:
print '###############################################'
print 'Verification concerning the division of 3D numpy array in python:'
print 'shapes:', self.mean_both_classes_cond.shape, self.sum_nrls_both_classes_cond.shape, self.cumulLabels.shape, self.varcontrast_cond_both_classes.shape
print 'self.mean_both_classes_cond[0,1,300:306]:', self.mean_both_classes_cond[0,1,300:306]
print 'self.sum_nrls_both_classes_cond[0,1,300:306]:', self.sum_nrls_both_classes_cond[0,1,300:306]
print 'self.cumulLabels[0,1,300:306]:', self.cumulLabels[0,1,300:306]
print 'self.sum_nrls_carr_both_classes_cond[0,1,300:306]:', self.sum_nrls_carr_both_classes_cond[0,1,300:306]
print 'self.varcontrast_cond_both_classes[0,1,300:306]:', self.varcontrast_cond_both_classes[0,1,300:306]
if self.computeContrastsFlag:
self.computeContrasts()
#self.cleanMemory()
if self.samplerEngine.check_ftval is not None:
if self.trueLabels is None:
pyhrf.verbose(4, 'Warning: no true labels to check against')
elif self.sampleLabels:
fv = self.finalLabels
tv = self.trueLabels
diffs = (fv != tv)
delta = diffs.sum()*1. / fv.shape[1]
if delta > 0.05:
m = "Final value of labels is not close to " \
"true value.\n -> %%diffs: %1.2f\n" \
" Final value:\n %s\n True value:\n %s\n" \
%(delta, str(fv), str(tv))
if self.samplerEngine.check_ftval == 'raise':
raise Exception(m)
elif self.samplerEngine.check_ftval == 'print':
print '\n'.join(['!! '+ s for s in m.split('\n')])
self.compute_summary_stats()
def getRocData(self, dthres=0.005):
if self.trueLabels is not None:
thresholds = arange(0,1/dthres) * dthres
oneMinusSpecificity = np.zeros((self.nbConditions, len(thresholds)))
sensitivity = np.zeros((self.nbConditions, len(thresholds)))
for it,thres in enumerate(thresholds):
labs = threshold_labels(self.meanLabels,thres)
self.markWrongLabels(labs)
for cond in xrange(self.nbConditions):
if 1 and self.dataInput.cNames[cond] == 'audio':
print "**cond %d **" %cond
print 'marked labels:'
print labs[cond,:]
print 'simulated labels:'
print self.dataInput.simulData.nrls.labels[cond,:]
counts = bincount(labs[cond,:])
nbTrueNeg = counts[0]
nbTruePos = counts[1] if len(counts)>1 else 0
fp = self.FALSE_POS
nbFalsePos = counts[fp] if len(counts)>fp else 0
fn = self.FALSE_NEG
nbFalseNeg = counts[fn] if len(counts)>fn else 0
if 1 and self.dataInput.cNames[cond] == 'audio':
print 'TN :', nbTrueNeg
print 'TP :', nbTruePos
print 'FP :', nbFalsePos
print 'FN :', nbFalseNeg
if nbTruePos == 0:
sensitivity[cond,it] = 0
else:
sensitivity[cond,it] = nbTruePos / \
(nbTruePos+nbFalseNeg+0.0)
spec = 1-nbTrueNeg/(nbTrueNeg+nbFalsePos+0.0)
oneMinusSpecificity[cond,it] = spec
if 1 and self.dataInput.cNames[cond] == 'audio':
print '-> se = ', sensitivity[cond, it]
print '-> 1-sp = ', oneMinusSpecificity[cond,it]
spGrid = arange(0.,1.,0.01)
omspec = np.zeros((self.nbConditions, len(spGrid)))
sens = np.zeros((self.nbConditions, len(spGrid)))
for cond in xrange(self.nbConditions):
order = argsort(oneMinusSpecificity[cond,:])
if oneMinusSpecificity[cond,order][0] != 0.:
osp = np.concatenate(([0.],oneMinusSpecificity[cond,order]))
se = np.concatenate(([0.],sensitivity[cond,order]))
else:
osp = oneMinusSpecificity[cond,order]
se = sensitivity[cond,order]
if osp[-1] != 1.:
osp = np.concatenate((osp,[1.]))
se = np.concatenate((se,[1.]))
sens[cond,:] = resampleToGrid(osp, se, spGrid)
omspec[cond, :] = spGrid
if 1 and self.dataInput.cNames[cond] == 'audio':
print '-> se :'
print sens[cond,:]
print 'spec grid :'
print spGrid
return sens, omspec
else:
return None, None
def compute_summary_stats(self):
self.stats = {}
pyhrf.verbose(4, 'Compute PPM outputs ...')
vthresh = getattr(self, 'ppm_value_thresh', 0)
if hasattr(self, 'count_above_thresh'):
ppm_mcmc = self.freq_above_thresh
ppm_tag = 'PPM_g_MCMC'
self.stats[ppm_tag] = ppm_mcmc
ppm_mcmc = self.freq_above_Multi_thresh
ppm_tag = 'PPM_g_MCMC_MultiThresh'
self.stats[ppm_tag] = ppm_mcmc
#if self.smplHistory is not None:
##quant = [1-self.ppm_proba_thresh]
#ppm_mcmc = np.zeros_like(self.finalValue)
#for j in range(self.nbConditions):
## ppm_mcmc[j,:] = mquantiles(self.smplHistory[its,j,:],
## prob=quant, axis=0)
#ppm_mcmc[j,:] = cpt_ppm_a_mcmc(self.smplHistory[self.samplerEngine.nbSweeps:,j,:],
#self.ppm_proba_thresh)
#ppm_tag = 'PPM_a_MCMC'
#self.stats[ppm_tag] = ppm_mcmc
#if hasattr(self, 'meanClassApost'):
#from pyhrf.stats import gm_cdf
#mci = self.meanClassApost[self.L_CI,:,:]
#vci = self.varClassApost[self.L_CI,:,:]
#pci = self.meanLabels[self.L_CI,:,:]
#mca = self.meanClassApost[self.L_CA,:,:]
#vca = self.varClassApost[self.L_CA,:,:]
#pca = self.meanLabels[self.L_CA,:,:]
## PPM as sf(thresh) of \sum_i \Nc(m_apost_i, v_apost_i)
#ppm_tag = 'PPM_g_apost'
#ppm_nrls = np.zeros_like(self.finalValue)
#for i in xrange(self.nbConditions):
##avoid underflow errors in pdf computation:
#v = self.varClassApost[:,i,:].astype(np.float64)
#ppm_nrls[i,:] = 1 - gm_cdf(vthresh,
#self.meanClassApost[:,i,:],
#v,
#self.meanLabels[:,i,:])
#self.stats[ppm_tag] = ppm_nrls
## PPM as inv_cdf(1-ppm_proba) of \sum_c \Nc(m_apost_c, v_apost_c)
## TODO ! -> need numerical computation (-> loop over voxels...)
#from scipy.stats import norm
##PPM as isf(proba) of \Nc(mean_MCMC, var_MCMC)
#output_name = 'PPM_a_norm_online'
#pthresh = getattr(self, 'ppm_proba_thresh', 0.05)
#ppm_empirical_pt = norm.isf(pthresh, self.finalValue,
#self.error**.5)
#self.stats[output_name] = ppm_empirical_pt
##PPM as sf(thresh) of \Nc(mean_MCMC, var_MCMC)
#output_name = 'PPM_g_norm_online'
#if self.error.size != 1:
#self.error[np.where(self.error==0.)] = 1e-6
#x = ((vthresh-self.finalValue)/self.error**.5).clip(-20,10)
## for ls,es in zip(self.finalValue, self.error**.5):
## for l,e in zip(ls,es):
#ppm_empirical_vt = norm.sf(x)
#else:
#pyhrf.verbose(1, 'error is empty and thus put to 0')
#ppm_empirical_vt=0
#pyhrf.verbose(1, 'Warning ppm_empiricall_vt put to 0')
#self.stats[output_name] = ppm_empirical_vt
##PPM as isf(proba) of \Nc(mean_c, var_c) with c=argmax(labels)
#output_name = 'PPM_a_norm_max_q'
#argmax_labels = np.argmax(self.meanLabels,0)
#mu_q_max = np.zeros_like(self.finalValue)
#var_q_max = np.zeros_like(self.finalValue)
#for i in range(self.nbClasses):
#m = np.where(argmax_labels==i)
#mu_q_max[m] = self.meanClassApost[i,m[0],m[1]]
#var_q_max[m] = self.varClassApost[i,m[0],m[1]]
#var_q_max[np.where(var_q_max==0)] = 1e-10
#ppm_napprox_pt = norm.isf(pthresh, mu_q_max, var_q_max**.5)
#self.stats[output_name] = ppm_napprox_pt
##PPM as sf(thresh) of \Nc(mean_c, var_c) with c=argmax(labels)
#output_name = 'PPM_g_norm_max_q'
#ppm_napprox_vt = norm.sf(vthresh, loc=mu_q_max,
#scale=var_q_max**.5)
#self.stats[output_name] = ppm_napprox_vt
## pvalue for H_0: A=0 and A ~ \Nc(0, var_c) with c=argmax(labels)
#output_name = 'pval_max_q'
#x = (self.finalValue/var_q_max**.5).clip(-20,10)
#pval = norm.sf(x)
#self.stats[output_name] = pval
## pvalue for H_0: A=0 and A ~ \Nc(0, var_MCMC)
#output_name = 'pval_online'
#x = (self.finalValue/self.error**.5).clip(-20,10)
#pval = norm.sf(x)
#self.stats[output_name] = pval
def getClassifRate(self):
r = np.zeros((self.nbClasses, self.nbConditions))
for j in xrange(self.nbConditions):
for ic in xrange(self.nbClasses):
idx = np.where(self.finalLabels[j,:] == ic)
r[ic,j] = (self.trueLabels[j,idx] == ic).sum(dtype=float) / \
(self.trueLabels[j,:] == ic).sum(dtype=float)
return r
def getOutputs(self):
outputs = GibbsSamplerVariable.getOutputs(self)
cn = self.dataInput.cNames
axes_names = ['voxel']
roi_lab_vol = np.zeros(self.nbVox, dtype=np.int32) + \
self.dataInput.roiId
outputs['roi_mapping'] = xndarray(roi_lab_vol, axes_names=axes_names,
value_label='ROI')
if self.rescale_results:
shrf = self.samplerEngine.getVariable('hrf')
xh = shrf.calcXh(shrf.finalValue[1:-1])
nrl_rescaled = np.zeros_like(self.finalValue)
for c in xrange(xh.shape[1]):
nrl_rescaled[c,:] = self.finalValue[c,:] * \
(xh[:,c]**2).sum()**.5
outputs['nrl_rescaled'] = xndarray(nrl_rescaled,
axes_names=self.axes_names,
axes_domains=self.axes_domains,
value_label=self.value_label)
ad = {'condition':cn,
'time' : np.arange(self.dataInput.ny)*self.dataInput.tr
}
outputs['design_matrix'] = xndarray(xh,
axes_names=['time','condition'],
axes_domains=ad)
if pyhrf.__usemode__ == pyhrf.DEVEL:
if hasattr(self, 'finalValue_sign_corr'):
outputs['nrl_sign_corr'] = xndarray(self.finalValue_sign_corr,
axes_names=self.axes_names,
axes_domains=self.axes_domains,
value_label=self.value_label)
axes_names = ['class','condition', 'voxel']
axes_domains = {'condition' : cn, 'class' : self.CLASS_NAMES}
t = self.activ_thresh
from scipy.stats.mstats import mquantiles
region_is_active = mquantiles(self.finalValue.max(0), prob=[.9]) > \
self.activ_thresh
region_is_active = region_is_active.astype(np.int16)
pyhrf.verbose(5, 'mquantiles(self.finalValue.max(0), prob=[.9]):')
pyhrf.verbose.printNdarray(5, mquantiles(self.finalValue.max(0),
prob=[.9]))
pyhrf.verbose(5, 'self.finalValue.mean(1).max(): %f' \
%self.finalValue.mean(1).max())
pyhrf.verbose(5, '(self.finalValue.max(0) > t).sum(): %d' \
%(self.finalValue.max(0) > t).sum())
region_is_active = np.tile(region_is_active, self.nbVox)
outputs['active_regions_from_nrls'] = xndarray(region_is_active,
axes_names=['voxel'])
if hasattr(self, 'cumulLabels'):
outputs['pm_cumulLabels'] = xndarray(self.cumulLabels,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['labels_pm'] = xndarray(self.meanLabels,
axes_names=axes_names,
axes_domains=axes_domains,
value_label="pm Labels")
#if self.trueLabels is not None:
# outputs['pmLabels'].applyMask(self.trueLabelsMask)
axes_names = ['condition', 'voxel']
axes_domains = {'condition' : cn}
l = self.finalLabels.astype(np.int32)
outputs['labels_pm_thresh'] = xndarray(l, axes_names=axes_names,
axes_domains=axes_domains,
value_label="pm Labels Thres")
# if hasattr(self, 'meanBeta'):
# #print 'output beta mapped !!!!'
# axes_names = ['condition', 'voxel']
# nbv, nbc = self.nbVox, self.nbConditions
# repeatedBeta = repeat(self.meanBeta, nbv).reshape(nbc, nbv)
# outputs['pm_BetaMapped'] = xndarray(repeatedBeta,
# axes_names=axes_names,
# axes_domains=axes_domains,
# value_label="pm Beta")
if 0:
axes_names = ['gamma', 'condition', 'voxel']
axes_domains = {'gamma': self.ppm_value_multi_thresh, 'condition' : cn}
outputs['PPM_g_MCMC_MultiThresh'] = xndarray(self.stats['PPM_g_MCMC_MultiThresh'], axes_names=axes_names,
axes_domains=axes_domains)
for stat_name, stat in self.stats.iteritems():
if stat_name != 'PPM_g_MCMC_MultiThresh':
axes_names = ['condition', 'voxel']
axes_domains = {'condition' : cn}
outputs[stat_name] = xndarray(stat, axes_names=axes_names,
axes_domains=axes_domains)
if hasattr(self, 'meanClassApost'):
mci = self.meanClassApost[self.L_CI,:,:]
vci = self.varClassApost[self.L_CI,:,:]
pci = self.meanLabels[self.L_CI,:,:]
mca = self.meanClassApost[self.L_CA,:,:]
vca = self.varClassApost[self.L_CA,:,:]
pca = self.meanLabels[self.L_CA,:,:]
outputs['mean_CA_apost'] = xndarray(mca,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['var_CA_apost'] = xndarray(vca,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['proba_CA_apost'] = xndarray(pca,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['mean_CI_apost'] = xndarray(mci,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['var_CI_apost'] = xndarray(vci,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['proba_CI_apost'] = xndarray(pci,
axes_names=axes_names,
axes_domains=axes_domains)
if hasattr(self, 'labelsMeanHistory') and \
self.labelsMeanHistory is not None:
axes_names = ['iteration', 'class', 'condition', 'voxel']
axes_domains = {'condition' : cn,
'class': self.CLASS_NAMES,
'iteration': self.obsHistoryIts}
outputs['labels_pm_hist'] = xndarray(self.labelsMeanHistory,
axes_names=axes_names,
axes_domains=axes_domains,
value_label="label")
if hasattr(self, 'labelsSmplHistory') and \
self.labelsSmplHistory is not None:
axes_names = ['iteration', 'condition', 'voxel']
axes_domains = {'condition' : cn,
'iteration':self.smplHistoryIts}
outputs['labels_smpl_hist'] = xndarray(self.labelsSmplHistory,
axes_names=axes_names,
axes_domains=axes_domains,
value_label="label")
if self.trueLabels is not None:
if 0:
mlabels = self.meanLabels[self.L_CA,:,:]
#easy_install --prefix=$USRLOCAL -U scikits.learn
se,sp,auc = compute_roc_labels_scikit(mlabels,
self.trueLabels)
sensData, specData = se, sp
else:
sensData,specData,auc = compute_roc_labels(self.meanLabels,
self.trueLabels,
0.005,
self.L_CA,
self.L_CI,
self.FALSE_POS,
self.FALSE_NEG)
pyhrf.verbose(2, 'Areas under ROC curves are : %s' \
%str(auc))
#auc = np.array([trapz(sensData[j,:], specData[j,:])
# for j in xrange(self.nbConditions)])
#print auc
# axes_names = ['condition']
# outName = 'Area under ROC curve'
# outputs[outName] = xndarray(area, axes_names=axes_names,
# axes_domains={'condition' : cn})
axes_names = ['condition','1-specificity']
outName = 'ROC'
ad = {'1-specificity':specData[0],'condition':cn}
outputs[outName] = xndarray(sensData, axes_names=axes_names,
axes_domains=ad,
value_label='sensitivity')
axes_names = ['condition']
outputs['AUROC'] = xndarray(auc, axes_names=axes_names,
axes_domains={'condition':cn})
cRate = self.getClassifRate()
axes_names = ['class', 'condition']
ad = {'condition':cn, 'class':self.CLASS_NAMES}
outputs['labels_classif_rate'] = xndarray(cRate,
axes_names=axes_names,
axes_domains=ad)
if self.trueLabels is not None:
markedLabels = self.getFinalLabels().copy()
#markedLabels = self.getFinalLabels(0.8722).copy()
self.markWrongLabels(markedLabels)
axes_names = ['condition', 'voxel']
ad = {'condition':cn}
outputs['labels_thresh_marked'] = xndarray(markedLabels,
axes_names=axes_names,
axes_domains=ad)
#axes_names = ['condition', 'ny', 'nh']
#axes_domains = {'condition' : cn,
#'ny': arange(self.ny),
#'nh': arange(self.nh)}
#outputs['varX'] = xndarray(self.dataInput.varX,
#axes_names=axes_names,
#axes_domains=axes_domains,
#value_label="varX")
#outputs['varXCond'] = xndarray(self.dataInput.varSingleCondXtrials,
#axes_names=axes_names,
#axes_domains=axes_domains,
#value_label="varCondXtrials")
#outputs['multXXcond'] = xndarray(self.dataInput.varSingleCondXtrials*self.dataInput.varX,
#axes_names=axes_names,
#axes_domains=axes_domains,
#value_label="multX-CondX")
pyhrf.verbose(3,'computeContrastsFlag: %s' \
%str(self.computeContrastsFlag))
if self.dataInput.simulData is not None:
#trueNrls = self.dataInput.simulData.nrls.data
trueNrls = self.trueValue
if trueNrls.shape == self.finalValue.shape:
axes_names = ['condition', 'voxel']
ad = {'condition':cn}
relErrorNrls = abs(trueNrls - self.finalValue)
outputs['nrl_pm_error'] = xndarray(relErrorNrls,
axes_names=axes_names,
axes_domains=ad)
axes_names = ['condition', 'voxel']
ad = {'condition':cn}
marked_labels = mark_wrong_labels(self.finalLabels,
self.trueLabels)
outputs['pm_Labels_marked'] = xndarray(marked_labels,
axes_names=axes_names,
axes_domains=ad)
n = (trueNrls.astype(np.float32) - \
self.finalValue.astype(np.float32))**2
outputs['nrl_pm_rmse'] = xndarray(n.mean(1),
axes_names=['condition'],
axes_domains=ad)
# Computing Nrl RMSE in Activated Voxels only, by multipying estimated NRLs with estimated Labels
#nl = n * l
#outputs['nrl_labels_pm_rmse'] = xndarray(nl.mean(1),
#axes_names=['condition'],
#axes_domains=ad)
if self.computeContrastsFlag:
pyhrf.verbose(3,'self.outputConVars:')
pyhrf.verbose.printNdarray(3, self.outputConVars)
cons = np.array(self.contrasts.values())
con_names = self.contrasts.keys()
con_doms = axes_domains={'contrast':con_names}
outputs['nrl_contrasts'] = xndarray(cons,
axes_names=['contrast','voxel'],
axes_domains=con_doms,
value_label='contrast')
con_vars = np.array([self.contrastsVar[c] for c in con_names])
outputs['nrl_contrasts_var'] = xndarray(con_vars,
axes_names=['contrast','voxel'],
axes_domains=con_doms,
value_label='contrast_var')
outputs['nrl_ncontrasts'] = xndarray(cons/con_vars**.5,
axes_names=['contrast','voxel'],
axes_domains=con_doms,
value_label='contrast')
# axes_names = ['voxel']
# for con_name, con_val in self.contrasts.iteritems():
# outputName = 'nrl_con_' + con_name
# print 'contrastes:', outputName
# outputs[outputName] = xndarray(con_val, axes_names=axes_names,
# value_label="contrast")
# if self.outputConVars:
# con_var = self.contrastsVar[con_name]
# outputName = 'nrl_con_var_' + con_name
# outputs[outputName] = xndarray(con_var, axes_names=axes_names,
# value_label="contrastVar")
if self.computeContrastsFlag and self.wip_variance_computation:
axes_names = ['class','condition','voxel']
axes_domains = {'condition' : cn,'class': self.CLASS_NAMES}
outputs['nrl_mean_mean_apost'] = xndarray(self.mean_mean_apost,
axes_names=axes_names,
axes_domains=axes_domains)
outputs['nrl_mean_var_apost'] = xndarray(self.mean_var_apost,
axes_names=axes_names,
axes_domains=axes_domains)
#Work after simulation step
#AII1/ Contraste = CL of conditions - independant samples
#Variances masked for contrast
outputName = 'pm_var_con_2cond_indep_masked'#+cond+'-'+cond2
var = self.varCon_2cond_indep_masked
outputs[outputName] = xndarray(var, axes_names=['cases', 'voxel'])
outputName = 'pm_var_con_2cond_indep_apost'#+cond+'-'+cond2
var = self.varCon_2cond_indep_apost
outputs[outputName] = xndarray(var, axes_names=['cases', 'voxel'])
#B II/Contraste = CL of conditions - correlated samples
outputName = 'pm_var_con_2cond_corr_masked'#+cond+'-'+cond2
var = self.varCon_2cond_corr_masked
outputs[outputName] = xndarray(var, axes_names=['cases', 'voxel'])
outputName = 'pm_var_con_2cond_corr_non_masked'#+cond+'-'+cond2
var = self.varCon_2cond_corr_non_masked
outputs[outputName] = xndarray(var, axes_names=['voxel'])
#if 0:
#outputName = 'pm_nrl_con_2cond_corr_non_masked'#+cond+'-'+cond2
#var = self.varCon_2cond_corr_non_masked
#outputs[outputName] = xndarray(var, axes_names=['voxel'])
outputName = 'pm_var_con_2cond_corr_apost'#+cond+'-'+cond2
var = self.varCon_2cond_corr_apost
outputs[outputName] = xndarray(var, axes_names=['cases', 'voxel'])
if self.wip_variance_computation \
and ('calculaudio' in self.dataInput.cNames):
#for cond in self.contrasts.iterkeys():
#for cond2 in self.contrasts[cond].iterkeys():
#if self.outputCons:
##print 'outputCons ...'
#outputName = 'nrl_con_'+cond+'-'+cond2
#con = self.contrasts[cond][cond2]
#outputs[outputName] = xndarray(con,
#axes_names=axes_names,
#value_label="contrast")
#outputName = 'nrl_ncon_'+cond+'-'+cond2
#con = self.contrasts[cond][cond2]
#conVar = self.contrastsVar[cond][cond2]
#outputs[outputName] = xndarray(con/conVar**.5,
#axes_names=axes_names,
#value_label="contrast")
#if self.outputConVars:
##print 'outputConVars ...'
#outName = 'nrl_convar_'+cond+'_'+cond2
#conVar = self.contrastsVar[cond][cond2]
#outputs[outName] = xndarray(conVar,
#axes_names=axes_names,
#value_label="contrastVar")
#Save variances of contrasts
#print "Contrasts"
outputName = 'pm_nrl_contrast_Lc-Rc'#+cond+'-'+cond2
con = self.contrast_Lc_Rc
outputs[outputName] = xndarray(con,
axes_names=['voxel'],
value_label="contrast")
outputName = 'pm_nrl_contrast_Lc-Rc_variance'#+cond+'-'+cond2
var = self.contrast_var_Lc_Rc
outputs[outputName] = xndarray(var,
axes_names=['voxel'],
value_label="contrastVar")
outputName = 'pm_nrl_contrast_V-A'#+cond+'-'+cond2
con = self.contrast_V_A
outputs[outputName] = xndarray(con,
axes_names=['voxel'],
value_label="contrast")
outputName = 'pm_nrl_contrast_V-A_variance'#+cond+'-'+cond2
var = self.contrast_var_V_A
outputs[outputName] = xndarray(var,
axes_names=['voxel'],
value_label="contrastVar")
outputName = 'pm_nrl_contrast_C-S'#+cond+'-'+cond2
con = self.contrast_C_S
outputs[outputName] = xndarray(con,
axes_names=['voxel'],
value_label="contrast")
outputName = 'pm_nrl_contrast_C-S_variance'#+cond+'-'+cond2
var = self.contrast_var_C_S
outputs[outputName] = xndarray(var,
axes_names=['voxel'],
value_label="contrastVar")
outputName = 'pm_nrl_contrast_C-S_A'#+cond+'-'+cond2
con = self.contrast_C_S_A
outputs[outputName] = xndarray(con,
axes_names=['voxel'],
value_label="contrast")
outputName = 'pm_nrl_contrast_C-S_A_variance'#+cond+'-'+cond2
var = self.contrast_var_C_S_A
outputs[outputName] = xndarray(var,
axes_names=['voxel'],
value_label="contrastVar")
#Variance masked --> for both classes
axes_names = ['class','condition', 'voxel']
ad = {'condition' : cn, 'class' : self.CLASS_NAMES}
outputs['pm_VarContrast_both_classes'] = xndarray(self.varcontrast_cond_both_classes,
axes_names=axes_names,axes_domains=ad)
#Variance masked --> final values with only values corresponding to the final class for each voxel
axes_names = ['condition', 'voxel']
ad = {'condition' : cn}
outputs['pm_finalVariances'] = xndarray(self.finalVariances,
axes_names=axes_names,axes_domains=ad)
#Variance mean a post --> final values with only values corresponding to the final class for each voxel
axes_names = ['condition', 'voxel']
ad = {'condition' : cn}
outputs['pm_final_mean_var_a_post'] = xndarray(self.final_mean_var_a_post,
axes_names=axes_names,axes_domains=ad)
##Variance masked for inactiv class
#axes_names = ['condition', 'voxel']
#ad = {'condition':cn}
#outputs['pm_VarContrast_class_inactiv'] = xndarray(self.varcontrast_cond_both_classes,
#axes_names=axes_names,axes_domains=ad)
return outputs
class NRLSamplerWithRelVar(NRLSampler):
defaultParameters = copyModule.deepcopy(NRLSampler.defaultParameters)
parametersToShow = copyModule.deepcopy(NRLSampler.parametersToShow)
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
NRLSampler.__init__(self, parameters, xmlHandler, xmlLabel, xmlComment)
def linkToData(self, dataInput):
NRLSampler.linkToData(self, dataInput)
def checkAndSetInitValue(self, variables):
NRLSampler.checkAndSetInitValue(self, variables)
def createWAxh(self,aXh, w):
np.multiply(w, aXh, self.WaXh)
def computeWA(self, a, w, wa):
for j in np.arange(self.nbConditions):
wa[j,:] = w[j] * a[j,:]
def computeSumWAxh(self, wa, varXh):
self.sumWaXh = np.dot(varXh, wa)
def subtractYtildeWithRelVar(self):
np.subtract(self.dataInput.varMBY, self.sumWaXh, self.varYtilde)
def computeVarYTildeOptWithRelVar(self, varXh, w):
if 0:
# yTilde_j = y_j - sum_m(a_j^m w^m X^m h)
pyhrf.verbose(5,'computeVarYTildeOpt...')
pyhrf.verbose(5,'varXh:' +str(varXh.shape))
wa = np.zeros((self.nbConditions, self.nbVox))
self.computeWA(self.currentValue, w, wa)
self.computeSumWAxh(wa, varXh)
pyhrf.verbose(5,'sumWaXh %s' %str(self.sumWaXh.shape))
pyhrf.verbose.printNdarray(6, self.sumWaXh)
#np.subtract(self.dataInput.varMBY, self.sumaXh, self.varYtilde)
self.subtractYtildeWithRelVar()
else:
wa = np.zeros((self.nbConditions, self.nbVox), dtype=float)
computeYtildeWithRelVar(varXh,
self.currentValue,
self.dataInput.varMBY,
self.varYtilde,
self.sumWaXh,
w.astype(np.int32),
wa.astype(np.float64))
pyhrf.verbose(5,'varYtilde %s' %str(self.varYtilde.shape))
pyhrf.verbose.printNdarray(5, self.varYtilde)
def computeComponentsApostWithRelVar(self, variables, j, gTQg, w):
sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
var = sIMixtP.getCurrentVars()
mean = sIMixtP.getCurrentMeans()
rb = variables[self.samplerEngine.I_NOISE_VAR].currentValue
varXh = variables[self.samplerEngine.I_HRF].varXh
nrls = self.currentValue
if(w):
# If wj = 1 The condition is relevant, we compute the posterior components as we did
# without introducing the relevant variable w
gTQgjrb = gTQg[j]/rb
if pyhrf.verbose > 4:
print 'Current components:'
print 'mean CI = %f, var CI = %f' %(mean[self.L_CI,j], var[self.L_CI,j])
print 'mean CA = %f, var CA = %f' %(mean[self.L_CA,j], var[self.L_CA,j])
print 'gTQg =', gTQg[j]
pyhrf.verbose(6, 'gTQg[%d] %s:'%(j,str(gTQg[j].shape)))
pyhrf.verbose.printNdarray(6, gTQg[j])
pyhrf.verbose(6, 'rb %s :'%str(rb.shape))
pyhrf.verbose.printNdarray(6, rb)
pyhrf.verbose(6, 'gTQgjrb %s :'%str(gTQgjrb.shape))
pyhrf.verbose.printNdarray(6, gTQgjrb)
ej = self.varYtilde + nrls[j,:] \
* repmat(varXh[:,j],self.nbVox, 1).transpose()
pyhrf.verbose(6, 'varYtilde %s :'%str((self.varYtilde.shape)))
pyhrf.verbose.printNdarray(6, self.varYtilde)
pyhrf.verbose(6, 'nrls[%d,:] %s :'%(j,nrls[j,:]))
pyhrf.verbose.printNdarray(6, nrls[j,:])
pyhrf.verbose(6, 'varXh[:,%d] %s :'%(j,str(varXh[:,j].shape)))
pyhrf.verbose.printNdarray(6, varXh[:,j])
pyhrf.verbose(6, 'repmat(varXh[:,%d],self.nbVox, 1).transpose()%s:' \
%(j,str((repmat(varXh[:,j],self.nbVox, 1).transpose().shape))))
pyhrf.verbose.printNdarray(6, repmat(varXh[:,j],self.nbVox, 1).transpose())
pyhrf.verbose(6, 'ej %s :'%str((ej.shape)))
pyhrf.verbose.printNdarray(6, ej)
np.divide(np.dot(self.varXhtQ[j,:],ej), rb, self.varXjhtQjeji)
if pyhrf.verbose.verbosity > 5:
pyhrf.verbose(5, 'np.dot(self.varXhtQ[j,:],ej) %s :' \
%str(np.dot(self.varXhtQ[j,:],ej).shape))
pyhrf.verbose.printNdarray(5, np.dot(self.varXhtQ[j,:],ej))
pyhrf.verbose(5, 'self.varXjhtQjeji %s :' \
%str(self.varXjhtQjeji.shape))
pyhrf.verbose.printNdarray(5, self.varXjhtQjeji)
for c in xrange(self.nbClasses):
self.varClassApost[c,j,:] = 1./(1./var[c,j] + gTQgjrb)
np.sqrt(self.varClassApost[c,j,:], self.sigClassApost[c,j,:])
if c > 0: # assume 0 stands for inactivating class
np.multiply(self.varClassApost[c,j,:],
add(mean[c,j]/var[c,j], self.varXjhtQjeji),
self.meanClassApost[c,j,:])
else:
np.multiply(self.varClassApost[c,j,:], self.varXjhtQjeji,
self.meanClassApost[c,j,:])
pyhrf.verbose(5, 'meanClassApost %d cond %d :'%(c,j))
pyhrf.verbose.printNdarray(5, self.meanClassApost[c,j,:])
pyhrf.verbose(5, 'varClassApost %d cond %d :'%(c,j))
pyhrf.verbose.printNdarray(5, self.varClassApost[c,j,:])
else:
for c in xrange(self.nbClasses):
self.varClassApost[c,j,:] = var[0,j]
np.sqrt(self.varClassApost[c,j,:], self.sigClassApost[c,j,:])
self.meanClassApost[c,j,:] = mean[0,j]
pyhrf.verbose(5, 'meanClassApost %d cond %d :'%(c,j))
pyhrf.verbose.printNdarray(5, self.meanClassApost[c,j,:])
pyhrf.verbose(5, 'varClassApost %d cond %d :'%(c,j))
pyhrf.verbose.printNdarray(5, self.varClassApost[c,j,:])
def deltaWCorr0 (self, nbVox, moyqvoxj, t1, t2):
result = np.zeros(self.nbVox, dtype=float)
for i in xrange(self.nbVox):
num = np.exp( t1 * ((1/nbVox)*(moyqvoxj[i] + 1) - t2)) + 1
denom = np.exp( t1 * ((1/nbVox)*(moyqvoxj[i]) - t2)) + 1
result[i] = num[i]/denum[i]
return result
def deltaWCorr1 (self, nbVox, moyqvoxj, t1, t2):
result = np.zeros(self.nbVox, dtype=float)
for i in xrange(self.nbVox):
num = np.exp( - t1 * ((1/nbVox)*(moyqvoxj[i] + 1) - t2)) + 1
denom = np.exp( - t1 * ((1/nbVox)*(moyqvoxj[i]) - t2)) + 1
result[i] = num[i]/denum[i]
return result
def calcFracLambdaTildeWithRelCond(self, l, nbVox, moyqvoxj, t1, t2):
dWCorr1 = deltaWCorr1(nbVox, moyqvoxj, t1, t2)
return l*dWCorr1
def calcFracLambdaTildeWithIRRelCond(self, cond, c1, c2, variables, nbVox, moyqvoxj, t1, t2):
sWeightP = variables[self.samplerEngine.I_WEIGHTING_PROBA]
varLambda = sWeightP.currentValue
if self.samplerEngine.getVariable('beta').currentValue[cond] <= 0:
ratio = varLambda[c1]/varLambda[c2]
else:
ratio = 1
dWcorr0 = deltaWCorr0(nbVox, moyqvoxj, t1, t2)
return ratio*dWCorr0
def computemoyqvox(self, cardClass, nbVox):
'''
Compute mean of labels in ROI (without the label of voxel i)
'''
moyqvox = np.zeros((self.nbConditions, self.nbVox), dtype=float)
for i in xrange(self.nbVox):
moyqvox[:,i] = np.divide( cardClass[L_CA,:] - self.labels[:, i].transpose(), nbVox)
return moyqvox
def samplingWarmUp(self, variables):
NRLSampler.samplingWarmUp(self, variables)
self.sumWaXh = np.zeros((self.ny, self.nbVox), dtype=float)
def sampleLabelsWithRelVar(self, cond, variables):
# Parameters of Sigmoid function
t1 = 50
t2 = 0.25
moyqvox = self.computemoyqvox(CardClass, nbVox)
moyqvoxj = moyqvox[cond,:] # Vecteur of length nbVox
w = self.samplerEngine.getVariable('W').currentValue[cond]
if w:
fracLambdaTilde = self.calcFracLambdaTilde(cond, self.L_CI, self.L_CA,
variables)
fracLambdaTildeWithRelVar = self.calcFracLambdaTildeWithRelCond(self.fracLambdaTilde, nbVox, moyqvoxj, t1, t2)
else :
fracLambdaTildeWithRelVar = self.calcFracLambdaTildeWithIRRelCond(cond, self.L_CI, self.L_CA,
variables, nbVox, moyqvoxj, t1, t2)
beta = self.samplerEngine.getVariable('beta').currentValue[cond]
if self.samplerEngine.getVariable('beta').currentValue[cond] > 0:
#corrEnergiesC = np.zeros_like(self.corrEnergies)
if 1:
deltaCol = 0.
#TODO generalize ...
calcCorrEnergies(cond, self.labels, self.corrEnergies,
self.dataInput.neighboursIndexes,
deltaCol, self.nbClasses, self.L_CI, self.L_CA)
fracLambdaTilde *= np.exp(beta * self.corrEnergies[cond,:])
#print 'self.corrEnergies :'
#print self.corrEnergies
else :
for i in xrange(self.nbVox):
deltaE = self.calcDeltaEnergy(i, cond)
self.corrEnergies[cond,i] = deltaE
fracLambdaTilde[i] *= np.exp(beta * deltaE)
#assert np.allclose(corrEnergiesC[cond,:], self.corrEnergies[cond,:])
varLambdaApost = 1./(1.+fracLambdaTildeWithRelVar)
self.labels[cond,:] = np.array(self.labelsSamples[cond,:]<=varLambdaApost,
dtype=int )
if pyhrf.verbose > 6:
for i in xrange(self.nbVox):
print 'it%04d-cond%02d-Vox%03d ...' %(self.iteration,cond,i)
print 'mApostCA =', self.meanClassApost[self.L_CA,cond,i],
print 'mApostCI =', self.meanClassApost[self.L_CI,cond,i]
print 'sApostCA =', self.sigClassApost[self.L_CA,cond,i],
print 'sApostCI =', self.sigClassApost[self.L_CI,cond,i]
print 'rl_I_A =', fracLambdaTilde[i]
print 'lambda Apost CA =', varLambdaApost[i]
print 'random =', self.labelsSamples[cond,i]
print '-> labels = ', self.labels[cond,i]
def sampleNrlsParallelWithRelVar(self, varXh, rb, h, varLambda, varCI, varCA,
meanCA, gTQg, variables, w):
pyhrf.verbose(3, 'Sampling Nrls (parallel, no spatial prior) ...')
for j in xrange(self.nbConditions):
self.computeComponentsApostWithRelVar(variables, j, gTQg, w[j])
if self.sampleLabelsFlag:
pyhrf.verbose(3, 'Sampling labels - cond %d ...'%j)
self.sampleLabelsWithRelVar(j, variables)
self.countLabels(self.labels, self.voxIdx, self.cardClass)
pyhrf.verbose(3,'Sampling labels done!')
pyhrf.verbose(6, 'All labels cond %d:'%j)
pyhrf.verbose.printNdarray(6, self.labels[j,:])
if self.trueLabels is not None:
pyhrf.verbose(6, 'All true labels cond %d:'%j)
pyhrf.verbose.printNdarray(6, self.trueLabels[j,:])
for c in xrange(self.nbClasses):
putmask(self.sigApost[j,:], self.labels[j,:]==c,
self.sigClassApost[c,j,:])
putmask(self.meanApost[j,:],self.labels[j,:]==c,
self.meanClassApost[c,j,:])
oldVal = self.currentValue[j,:]
add(np.multiply(self.nrlsSamples[j,:], self.sigApost[j,:]),
self.meanApost[j,:], self.currentValue[j,:])
self.computeVarYTildeOptWithRelVar(varXh, w)
#self.computeVarYTilde(varXh)
def sampleNrlsSerialWithRelVar(self, rb, h,
gTQg, variables, w, t1, t2):
pyhrf.verbose(3, 'Sampling Nrls (serial, spatial prior) ...')
sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
var = sIMixtP.getCurrentVars()
mean = sIMixtP.getCurrentMeans()
varXh = np.array([variables[self.samplerEngine.I_HRF].varXh])
nrls = self.currentValue
neighbours = self.dataInput.neighboursIndexes
beta = self.samplerEngine.getVariable('beta').currentValue
voxOrder = np.random.permutation(self.nbVox)
if 0:
for j in xrange(self.nbConditions):
betaj = beta[j]
sampleSmmNrlWithRelVar(voxOrder, rb, neighbours, self.varYtilde,
self.labels[j,:], varXh[:,:,j],
self.currentValue[j,:],
self.nrlsSamples[j,:], self.labelsSamples[j,:],
np.array([self.varXhtQ[j,:]]), gTQg[j], betaj,
mean[:,j], var[:,j], self.nbClasses,
self.sampleLabelsFlag+0, self.iteration, j, w[j], self.cardClass[:,j])
#sys.exit(1)
self.countLabels(self.labels, self.voxIdx, self.cardClass)
else:
cardClassCA = np.zeros(self.nbConditions, dtype=int)
for i in range(self.nbConditions):
cardClassCA[i] = self.cardClass[self.L_CA,i]
sampleSmmNrl2WithRelVar(voxOrder.astype(np.int32), rb.astype(np.float64),
neighbours.astype(np.int32),
self.varYtilde,
self.labels, varXh.astype(np.float64),
self.currentValue,
self.nrlsSamples.astype(np.float64),
self.labelsSamples.astype(np.float64),
np.array([self.varXhtQ]).astype(np.float64),
gTQg.astype(np.float64),
beta.astype(np.float64), mean.astype(np.float64),
var.astype(np.float64), self.meanClassApost,
self.varClassApost, w.astype(np.int32), t1, t2,
cardClassCA.astype(np.int32), #self.cardClass,
self.nbClasses, self.sampleLabelsFlag+0, self.iteration,
self.nbConditions)
if (self.varClassApost<=0).any():
raise Exception('Negative posterior variances!')
self.countLabels(self.labels, self.voxIdx, self.cardClass)
def sampleNextInternal(self, variables):
#TODO : comment
#print 'iteration :', self.iteration
sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
varCI = sIMixtP.currentValue[sIMixtP.I_VAR_CI] # Varaince of in-activated class for all conditions
varCA = sIMixtP.currentValue[sIMixtP.I_VAR_CA] # Varaince of activated class for all conditions
meanCA = sIMixtP.currentValue[sIMixtP.I_MEAN_CA] # Mean of activated class for all conditions
rb = variables[self.samplerEngine.I_NOISE_VAR].currentValue
sHrf = variables[self.samplerEngine.I_HRF]
varXh = sHrf.varXh
h = sHrf.currentValue
w = variables[self.samplerEngine.I_W].currentValue
t1 = variables[self.samplerEngine.I_W].t1
t2 = variables[self.samplerEngine.I_W].t2
self.nh = np.size(h)
varLambda = variables[self.samplerEngine.I_WEIGHTING_PROBA].currentValue
pyhrf.verbose(5,'varXh %s :' %str(varXh.shape))
pyhrf.verbose.printNdarray(5, varXh)
self.computeVarYTildeOptWithRelVar(varXh, w)
self.computeVarXhtQ(h, self.dataInput.matXQ)
pyhrf.verbose(6,'varXhtQ %s :' %str(self.varXhtQ.shape))
pyhrf.verbose.printNdarray(5, self.varXhtQ)
self.labelsSamples = np.random.rand(self.nbConditions, self.nbVox)
self.nrlsSamples = np.random.randn(self.nbConditions, self.nbVox)
gTQg = np.diag(np.dot(self.varXhtQ,varXh))
if self.samplerEngine.getVariable('beta').currentValue[0] < 0:
self.sampleNrlsParallelWithRelVar(varXh, rb, h, varLambda, varCI,
varCA, meanCA, gTQg, variables, w)
else:
self.sampleNrlsSerialWithRelVar(rb, h, gTQg, variables, w, t1, t2)
self.computeVarYTildeOptWithRelVar(varXh, w)
if (self.currentValue >= 1000).any() and pyhrf.__usemode__ == pyhrf.DEVEL:
pyhrf.verbose(2, "Weird NRL values detected ! %d/%d" \
%((self.currentValue >= 1000).sum(),
self.nbVox*self.nbConditions) )
#pyhrf.verbose.set_verbosity(6)
if pyhrf.verbose.verbosity >= 4:
self.reportDetection()
self.computeAA(self.currentValue, self.aa)
wa = np.zeros((self.nbConditions, self.nbVox))
self.computeWA(self.currentValue, w, wa)
self.computeSumWAxh(wa, varXh)
self.printState(4)
print 'iteration ',self.iteration
self.iteration += 1 #TODO : factorize !!
class BiGaussMixtureParamsSampler(xmlio.XMLParamDrivenClass,
GibbsSamplerVariable):
"""
#TODO : comment
"""
I_MEAN_CA = 0
I_VAR_CA = 1
I_VAR_CI = 2
NB_PARAMS = 3
PARAMS_NAMES = ['Mean_Activ', 'Var_Activ', 'Var_Inactiv']
P_VAL_INI = 'initialValue'
P_SAMPLE_FLAG = 'sampleFlag'
P_USE_TRUE_VALUE = 'useTrueValue'
#P_ACT_MEAN_TRUE_VALUE = 'ActMeanTrueValue'
#P_ACT_VAR_TRUE_VALUE = 'ActVarTrueValue'
#P_INACT_VAR_TRUE_VALUE = 'InactVarTrueValue'
P_MEAN_CA_PR_MEAN = 'meanCAPrMean'
P_MEAN_CA_PR_VAR = 'meanCAPrVar'
P_VAR_CI_PR_ALPHA = 'varCIPrAlpha'
P_VAR_CI_PR_BETA = 'varCIPrBeta'
P_VAR_CA_PR_ALPHA = 'varCAPrAlpha'
P_VAR_CA_PR_BETA = 'varCAPrBeta'
P_HYPER_PRIOR = 'hyperPriorType'
P_ACTIV_THRESH = 'mean_activation_threshold'
#"peaked" priors
defaultParameters = {
P_VAL_INI : None,
P_SAMPLE_FLAG : True,
P_USE_TRUE_VALUE : False,
#P_HYPER_PRIOR : 'Jeffrey',
P_HYPER_PRIOR : 'proper',
P_MEAN_CA_PR_MEAN : 5.,
P_MEAN_CA_PR_VAR : 20.0,
P_VAR_CI_PR_ALPHA : 2.04,
P_VAR_CI_PR_BETA : .5,#2.08,
P_VAR_CA_PR_ALPHA : 2.01,
P_VAR_CA_PR_BETA : .5,
P_ACTIV_THRESH : 4.,
#P_ACT_MEAN_TRUE_VALUE : { 'audio': 0.0, 'video': 0.0 },
#P_ACT_VAR_TRUE_VALUE : { 'audio': 1.0, 'video': 1.0 },
#P_INACT_VAR_TRUE_VALUE : { 'audio': 1.0, 'video': 1.0 },
}
##"flat" priors
#defaultParameters = {
#P_VAL_INI : None,
#P_SAMPLE_FLAG : True,
#P_USE_TRUE_VALUE : False,
##P_HYPER_PRIOR : 'Jeffrey',
#P_HYPER_PRIOR : 'proper',
#P_SAMPLE_FLAG : 1,
#P_MEAN_CA_PR_MEAN : 10.,
#P_MEAN_CA_PR_VAR : 100.0,
#P_VAR_CI_PR_ALPHA : 2.04,
#P_VAR_CI_PR_BETA : 2.08,
#P_VAR_CA_PR_ALPHA : 2.001,
#P_VAR_CA_PR_BETA : 1.01,
#}
# a=2.5, b=0.5 => m=0.5/(2.5-1)=1/3 & v=0.5**2/((2.5-1)**2*(2.5-2))=0.2
# m=b/(a-1) , v=b**2/((a-1)**2*(a-2)
# a=m**2/v +2 , b=m**3/v + m
if pyhrf.__usemode__ == pyhrf.ENDUSER:
parametersToShow = []
L_CA = NRLSampler.L_CA
L_CI = NRLSampler.L_CI
parametersToShow = [ P_VAL_INI, P_SAMPLE_FLAG, P_ACTIV_THRESH,
P_USE_TRUE_VALUE,
#P_ACT_MEAN_TRUE_VALUE, P_ACT_VAR_TRUE_VALUE, P_INACT_VAR_TRUE_VALUE,
P_HYPER_PRIOR,
P_MEAN_CA_PR_MEAN, P_MEAN_CA_PR_VAR, P_VAR_CI_PR_ALPHA,
P_VAR_CI_PR_BETA, P_VAR_CA_PR_ALPHA, P_VAR_CA_PR_BETA]
parametersComments = {
P_HYPER_PRIOR : "Either 'proper' or 'Jeffrey'",
P_ACTIV_THRESH : "Threshold for the max activ mean above which the "\
"region is considered activating",
#P_ACT_MEAN_TRUE_VALUE : \
#"Define the simulated values of activated class means."\
#"It is taken into account when mixture parameters are not sampled.",
#P_ACT_VAR_TRUE_VALUE : \
#"Define the simulated values of activated class variances."\
#"It is taken into account when mixture parameters are not sampled.",
#P_INACT_VAR_TRUE_VALUE : \
#"Define the simulated values of inactivated class variances."\
#"It is taken into account when mixture parameters are not sampled.",
}
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
"""
#TODO : comment
"""
xmlio.XMLParamDrivenClass.__init__(self, parameters, xmlHandler,
xmlLabel, xmlComment)
sampleFlag = self.parameters[self.P_SAMPLE_FLAG]
valIni = self.parameters[self.P_VAL_INI]
useTrueVal = self.parameters[self.P_USE_TRUE_VALUE]
# get values for priors :
self.varCIPrAlpha = self.parameters[self.P_VAR_CI_PR_ALPHA]
self.varCIPrBeta = self.parameters[self.P_VAR_CI_PR_BETA]
self.varCAPrAlpha = self.parameters[self.P_VAR_CA_PR_ALPHA]
self.varCAPrBeta = self.parameters[self.P_VAR_CA_PR_BETA]
self.meanCAPrMean = self.parameters[self.P_MEAN_CA_PR_MEAN]
self.meanCAPrVar = self.parameters[self.P_MEAN_CA_PR_VAR]
#self.ActMeanTrueValue = self.parameters[self.P_ACT_MEAN_TRUE_VALUE]
#self.ActVarTrueValue = self.parameters[self.P_ACT_VAR_TRUE_VALUE]
#self.InactVarTrueValue = self.parameters[self.P_INACT_VAR_TRUE_VALUE]
an = ['component','condition']
ad = {'component' : self.PARAMS_NAMES}
GibbsSamplerVariable.__init__(self, 'mixt_params', valIni=valIni,
useTrueValue=useTrueVal,
sampleFlag=sampleFlag, axes_names=an,
axes_domains=ad)
php = self.parameters[self.P_HYPER_PRIOR]
self.hyperPriorFlag = False if php=='Jeffrey' else True
self.activ_thresh = self.parameters[self.P_ACTIV_THRESH]
def linkToData(self, dataInput):
self.dataInput = dataInput
self.nbConditions = self.dataInput.nbConditions
self.nbVox = self.dataInput.nbVoxels
self.ny = self.dataInput.ny
self.nbColX = self.dataInput.nbColX
self.nrlCI = range(self.nbConditions)
self.nrlCA = range(self.nbConditions)
if self.dataInput.simulData is not None and \
isinstance(self.dataInput.simulData, list):
if isinstance(self.dataInput.simulData[0], dict) and \
self.dataInput.simulData[0].has_key('condition_defs'):
#take only 1st session -> same is assumed for others
sd = self.dataInput.simulData
cdefs = sd[0]['condition_defs']
self.trueValue = np.zeros((self.NB_PARAMS, self.nbConditions),
dtype=float)
if 0:
#Theorethical true values:
mean_act = np.array([c.m_act for c in cdefs])
var_act = np.array([c.v_act for c in cdefs])
var_inact = np.array([c.v_inact for c in cdefs])
else:
#Empirical true values:
nbc = self.nbConditions
m_act = [np.where(sd[0]['labels'][j,:] == self.L_CA) \
for j in xrange(nbc) ]
m_inact = [np.where(sd[0]['labels'][j,:] == self.L_CI)\
for j in xrange(nbc) ]
all_nrls = np.array([ssd['nrls'] for ssd in sd])
mean_act = np.array([all_nrls[:,j,m_act[j][0]].mean() \
for j in xrange(nbc)])
var_act = np.array([all_nrls[:,j,m_act[j][0]].var() \
for j in xrange(nbc)])
var_inact = np.array([all_nrls[:,j,m_inact[j][0]].var() \
for j in xrange(nbc)])
#raise Exception()
self.trueValue[self.I_MEAN_CA] = mean_act
self.trueValue[self.I_VAR_CA] = var_act
self.trueValue[self.I_VAR_CI] = var_inact
if self.dataInput.simulData is not None and \
isinstance(self.dataInput.simulData, dict):
self.trueValue = np.zeros((self.NB_PARAMS, self.nbConditions),
dtype=float)
simulation = self.dataInput.simulData
if simulation.has_key('condition_defs'):
cdefs = simulation['condition_defs']
self.trueValue[self.I_MEAN_CA] = np.array([c.m_act for c in cdefs])
self.trueValue[self.I_VAR_CA] = np.array([c.v_act for c in cdefs])
self.trueValue[self.I_VAR_CI] = np.array([c.v_inact for c in cdefs])
#print 'meanCA linkToData : ', self.trueValue[self.I_MEAN_CA]
#print 'varCA linkToData : ', self.trueValue[self.I_VAR_CA]
#print 'varCI linkToData : ', self.trueValue[self.I_VAR_CI]
def checkAndSetInitValue(self, variables):
if self.currentValue is None:
if self.useTrueValue:
if self.trueValue is not None:
#TODO fix condition matching
self.currentValue = self.trueValue.copy()[:,:self.nbConditions]
else:
raise Exception('Needed a true value but none defined')
elif 0 and self.useTrueValue:
self.trueValue = np.zeros((self.NB_PARAMS, self.nbConditions ), dtype=float)
self.currentValue = np.zeros((self.NB_PARAMS, self.nbConditions), dtype=float)
self.trueValue[self.I_MEAN_CA] = self.ActMeanTrueValue.values()
self.trueValue[self.I_VAR_CA] = self.ActVarTrueValue.values()
self.trueValue[self.I_VAR_CI] = self.InactVarTrueValue.values()
self.currentValue = self.trueValue.copy()[:,:self.nbConditions]
else:
nc = self.nbConditions
self.currentValue = np.zeros((self.NB_PARAMS, self.nbConditions), dtype=float)
self.currentValue[self.I_MEAN_CA] = np.zeros(nc) + 30. #np.array([2.5,-3.4,0.,0.])
self.currentValue[self.I_VAR_CA] = np.zeros(nc) + 1.0
self.currentValue[self.I_VAR_CI] = np.zeros(nc) + 1.0
#self.currentValue[self.I_MEAN_CA] = np.zeros(nc) + 2. #np.array([2.5,-3.4,0.,0.])
#self.currentValue[self.I_VAR_CA] = np.zeros(nc) + 0.5
#self.currentValue[self.I_VAR_CI] = np.zeros(nc) + 0.5
#self.currentValue[self.I_MEAN_CA] = self.trueValue[self.I_MEAN_CA]
#self.currentValue[self.I_VAR_CA] = self.trueValue[self.I_VAR_CA]
#self.currentValue[self.I_VAR_CI] = self.trueValue[self.I_VAR_CI]
def getCurrentVars(self):
return np.array([self.currentValue[self.I_VAR_CI],
self.currentValue[self.I_VAR_CA]])
def getCurrentMeans(self):
return np.array([np.zeros(self.nbConditions),
self.currentValue[self.I_MEAN_CA]])
def computeWithProperPriors(self, j, cardCIj, cardCAj):
#print 'sample hyper parameters with proper priors ...'
if cardCIj > 1:
nu0j = .5*np.dot(self.nrlCI[j], self.nrlCI[j])
varCIj = 1.0/np.random.gamma(.5*cardCIj + self.varCIPrAlpha,
1/(nu0j + self.varCIPrBeta))
else :
pyhrf.verbose(6,'using only hyper priors for CI (empty class) ...')
varCIj = 1.0/np.random.gamma(self.varCIPrAlpha, 1/self.varCIPrBeta)
if cardCAj > 1:
#print 'cardCAj', cardCAj
eta1j = np.mean(self.nrlCA[j])
nrlCACentered = self.nrlCA[j] - self.currentValue[self.I_MEAN_CA,j]#eta1j
nu1j = .5 * np.dot(nrlCACentered, nrlCACentered)
#r = np.random.gamma(0.5*(cardCAj-1),2/nu1j)
varCAj = 1.0/np.random.gamma(0.5*cardCAj + self.varCAPrAlpha,
1/(nu1j + self.varCAPrBeta))
else :
pyhrf.verbose(6,'using only hyper priors for CA (empty class) ...')
eta1j = 0.0
varCAj = 1.0/np.random.gamma(self.varCAPrAlpha, 1/self.varCAPrBeta)
invVarLikelihood = (cardCAj+0.)/varCAj
## print 'self.meanCAPrVar :', self.meanCAPrVar
meanCAVarAPost = 1/(invVarLikelihood + 1/self.meanCAPrVar)
## print 'meanCAVarAPost = 1/(invVarLikelihood + 1/self.meanCAPrVar) :'
## print '%f = 1/(%f + 1/%f)' %(meanCAVarAPost,invVarLikelihood,self.meanCAPrVar)
#print 'meanCAVarAPost :', meanCAVarAPost
rPrMV = self.meanCAPrMean/self.meanCAPrVar
meanCAMeanAPost = meanCAVarAPost * (eta1j*invVarLikelihood+rPrMV)
#print 'meanCAMeanAPost :', meanCAMeanAPost
## print 'meanCAMeanAPost = meanCAVarAPost * (eta1j*invVarLikelihood + rPrMV) :'
## print '%f = %f *(%f*%f + %f)' %(meanCAMeanAPost,meanCAVarAPost,eta1j,invVarLikelihood,rPrMV)
## print 'meanCAMeanAPost :', meanCAMeanAPost
meanCAj = np.random.normal(meanCAMeanAPost, meanCAVarAPost**0.5)
#print 'v0 =',varCIj,', v1 =',varCAj,', m1 =',meanCAj
return varCIj,meanCAj,varCAj
def computeWithJeffreyPriors(self, j, cardCIj, cardCAj):
#print 'sample hyper parameters with improper Jeffrey\'s priors ...'
if pyhrf.verbose.verbosity >= 3:
print 'cond %d - card CI = %d' %(j,cardCIj)
print 'cond %d - card CA = %d' %(j,cardCAj)
print 'cond %d - cur mean CA = %f' %(j,self.currentValue[self.I_MEAN_CA,j])
if cardCAj > 0:
print 'cond %d - nrl CA: %f(v%f)[%f,%f]' %(j,self.nrlCA[j].mean(),
self.nrlCA[j].var(),
self.nrlCA[j].min(),
self.nrlCA[j].max())
if cardCIj > 0:
print 'cond %d - nrl CI: %f(v%f)[%f,%f]' %(j,self.nrlCI[j].mean(),
self.nrlCI[j].var(),
self.nrlCI[j].min(),
self.nrlCI[j].max())
if cardCIj > 1:
nu0j = np.dot(self.nrlCI[j], self.nrlCI[j])
varCIj = 1.0 / np.random.gamma(0.5 * (cardCIj + 1) - 1, 2. / nu0j)
#varCIj = 1.0 / np.random.gamma(0.5 * (cardCIj - 1), 2. / nu0j)
else :
varCIj = 1.0 / np.random.gamma(0.5, 0.2)
#HACK
#varCIj = .5
if cardCAj > 1:
nrlC1Centered = self.nrlCA[j] - self.currentValue[self.I_MEAN_CA,j]
##print 'nrlC1Centered :', nrlC1Centered
nu1j = np.dot(nrlC1Centered, nrlC1Centered)
#r = np.random.gamma(0.5 * (cardCAj + 1) - 1, 2 / nu1j)
#print 'nu1j / 2. :', nu1j / 2.
#print '0.5 * (cardCAj + 1) - 1 =', 0.5 * (cardCAj + 1) - 1
if pyhrf.verbose.verbosity >= 3:
print 'varCA ~ InvGamma(%f, nu1j/2=%f)' %(0.5*(cardCAj+1)-1,
nu1j/2.)
print ' -> mean =', (nu1j/2.)/(0.5*(cardCAj+1)-1)
varCAj = 1.0 / np.random.gamma(0.5 * (cardCAj + 1) - 1, 2. / nu1j)
#varCAj = 1.0 / np.random.gamma(0.5 * (cardCAj - 1), 2. / nu1j)
pyhrf.verbose(3,'varCAj (j=%d) : %f' %(j,varCAj))
if varCAj <= 0.:
print 'variance for class activ and condition %s '\
'is negative or null: %f' %(self.dataInput.cNames[j],varCAj)
print 'nu1j:', nu1j, '2. / nu1j', 2. / nu1j
print 'cardCAj:', cardCAj, '0.5 * (cardCAj + 1) - 1:', \
0.5 * (cardCAj + 1) - 1
print '-> setting it to almost 0.'
varCAj = 0.0001
#print '(varC1j/cardC1[j])**0.5 :', (varCAj/cardCAj)**0.5
eta1j = np.mean(self.nrlCA[j])
#print 'eta1j :', eta1j
meanCAj = np.random.normal(eta1j, (varCAj / cardCAj)**0.5)
# variance for class activ and condition video is negative or null:
# 0.000000
# nu1j: 2.92816412349e-306 2. / nu1j 6.83021823796e+305
# cardCAj: 501 0.5 * (cardCAj + 1) - 1: 250.0
# -> setting it to almost 0.
else :
#print 'Warning : cardCA <= 1!'
varCAj = 1.0 / np.random.gamma(.5, 2.)
if cardCAj == 0 :
meanCAj = np.random.normal(5.0, varCAj**0.5)
else:
meanCAj = np.random.normal(self.nrlCA[j], varCAj**0.5)
if pyhrf.verbose.verbosity >= 3:
print 'Sampled components - cond', j
print 'var CI =', varCIj
print 'mean CA =', meanCAj, 'var CA =', varCAj
return varCIj, meanCAj, varCAj
def sampleNextInternal(self, variables):
#TODO : comment
## print '- Sampling Mixt params ...'
nrlsSmpl = self.samplerEngine.getVariable('nrl')
cardCA = nrlsSmpl.cardClass[self.L_CA,:]
cardCI = nrlsSmpl.cardClass[self.L_CI,:]
for j in xrange(self.nbConditions):
vICI = nrlsSmpl.voxIdx[nrlsSmpl.L_CI][j]
vICA = nrlsSmpl.voxIdx[nrlsSmpl.L_CA][j]
self.nrlCI[j] = nrlsSmpl.currentValue[j, vICI]
self.nrlCA[j] = nrlsSmpl.currentValue[j, vICA]
for j in xrange(self.nbConditions):
#for j in np.random.permutation(self.nbConditions):
if self.hyperPriorFlag:
varCIj,meanCAj,varCAj = self.computeWithProperPriors(j,
cardCI[j],
cardCA[j])
else:
varCIj,meanCAj,varCAj = self.computeWithJeffreyPriors(j,
cardCI[j],
cardCA[j])
self.currentValue[self.I_VAR_CI, j] = varCIj
self.currentValue[self.I_MEAN_CA, j] = meanCAj #absolute(meanCAj)
self.currentValue[self.I_VAR_CA, j] = varCAj
pyhrf.verbose(5, 'varCI,%d=%f' \
%(j,self.currentValue[self.I_VAR_CI,j]))
pyhrf.verbose(5, 'meanCA,%d=%f' \
%(j,self.currentValue[self.I_MEAN_CA,j]))
pyhrf.verbose(5, 'varCA,%d = %f' \
%(j,self.currentValue[self.I_VAR_CA,j]))
def updateObsersables(self):
GibbsSamplerVariable.updateObsersables(self)
sHrf = self.samplerEngine.getVariable('hrf')
sScale = self.samplerEngine.getVariable('scale')
if sHrf.sampleFlag and np.allclose(sHrf.normalise,0.) and \
not sScale.sampleFlag and self.sampleFlag:
pyhrf.verbose(6, 'Normalizing Posterior mean of Mixture Parameters at each iteration ...')
#print '%%%% scaling NRL PME %%% - hnorm = ', sHrf.norm
# Undo previous calculation:
self.cumul -= self.currentValue
#self.cumul2 -= self.currentValue**2
self.cumul3 -= (self.currentValue - self.mean)**2
# Use scaled quantities instead:
cur_m_CA = self.currentValue[self.I_MEAN_CA]
cur_v_CA = self.currentValue[self.I_VAR_CA]
cur_v_CI = self.currentValue[self.I_VAR_CI]
self.cumul[self.I_MEAN_CA] += cur_m_CA * sHrf.norm
#self.cumul2[self.I_MEAN_CA] += (cur_m_CA * sHrf.norm)**2
self.cumul[self.I_VAR_CA] += cur_v_CA * sHrf.norm**2
#self.cumul2[self.I_VAR_CA] += (cur_v_CA * sHrf.norm**2)**2
self.cumul[self.I_VAR_CI] += cur_v_CI * sHrf.norm**2
#self.cumul2[self.I_VAR_CI] += (cur_v_CI * sHrf.norm**2)**2
self.mean = self.cumul / self.nbItObservables
self.cumul3[self.I_MEAN_CA] += (cur_m_CA * sHrf.norm - self.mean[self.I_MEAN_CA])**2
self.cumul3[self.I_VAR_CA] += (cur_v_CA * sHrf.norm**2 - self.mean[self.I_VAR_CA])**2
self.cumul3[self.I_VAR_CI] += (cur_v_CI * sHrf.norm**2 - self.mean[self.I_VAR_CI])**2
#self.error = self.cumul2 / self.nbItObservables - \
#self.mean**2
self.error = self.cumul3 / self.nbItObservables
def get_string_value(self, v):
v = v.transpose()
if 0:
print 'get_string_value for mixt_params ...'
print v.shape, self.dataInput.cNames
print '->', v[:,:len(self.dataInput.cNames)].shape
return get_2Dtable_string(v[:len(self.dataInput.cNames),:],
self.dataInput.cNames,
self.PARAMS_NAMES,)
def getOutputs(self):
outputs = {}
if pyhrf.__usemode__ == pyhrf.DEVEL:
outputs = GibbsSamplerVariable.getOutputs(self)
mixtp = np.zeros((2, self.nbConditions, 2))
mixtp[self.L_CA, :, 0] = self.finalValue[self.I_MEAN_CA,:]
mixtp[self.L_CA, :, 1] = self.finalValue[self.I_VAR_CA,:]
mixtp[self.L_CI, :, 0] = 0.
mixtp[self.L_CI, :, 1] = self.finalValue[self.I_VAR_CI,:]
an = ['class','condition','component']
ad = {'class':['inactiv','activ'],'condition':self.dataInput.cNames,
'component':['mean','var']}
outputs['pm_'+self.name] = xndarray(mixtp, axes_names=an,
axes_domains=ad)
mixtp_mapped = np.tile(mixtp, (self.nbVox, 1, 1, 1))
outputs['pm_'+self.name+'_mapped'] = xndarray(mixtp_mapped,
axes_names=['voxel']+an,
axes_domains=ad)
region_is_active = self.finalValue[self.I_MEAN_CA,:].max() > \
self.activ_thresh
region_is_active = region_is_active.astype(np.int16)
region_is_active = np.tile(region_is_active, self.nbVox)
an = ['voxel']
outputs['active_regions_from_mean_activ'] = xndarray(region_is_active,
axes_names=an)
return outputs
def finalizeSampling(self):
GibbsSamplerVariable.finalizeSampling(self)
del self.nrlCA
del self.nrlCI
#class NRL_Multi_Sess_NRLsBar_Sampler(xmlio.XMLParamDrivenClass, GibbsSamplerVariable):
## parameters specifications :
#P_SAMPLE_FLAG = 'sampleFlag'
#P_VAL_INI = 'initialValue'
#P_USE_TRUE_NRLS = 'useTrueNrls'
#P_TrueNrlFilename = 'TrueNrlFilename'
#P_OUTPUT_NRL = 'writeResponsesOutput'
## parameters definitions and default values :
#defaultParameters = {
#P_SAMPLE_FLAG : True,
#P_VAL_INI : None,
#P_USE_TRUE_NRLS : False, #False,
#P_OUTPUT_NRL : True,
#P_TrueNrlFilename : None, #'./nrls.nii',
#}
#if pyhrf.__usemode__ == pyhrf.DEVEL:
#parametersToShow = [P_SAMPLE_FLAG, P_VAL_INI, P_USE_TRUE_NRLS, P_TrueNrlFilename,
#P_OUTPUT_NRL]
#elif pyhrf.__usemode__ == pyhrf.ENDUSER:
#parametersToShow = [P_OUTPUT_NRL]
#parametersComments = {
## P_CONTRASTS : 'Define contrasts as a string with the following format:'\
## '\n condition1-condition2;condition1-condition3\n' \
## 'Must be consistent with condition names specified in session data' \
## 'above',
#P_TrueNrlFilename :'Define the filename of simulated NRLs.\n'\
#'It is taken into account when NRLs is not sampled.',
#}
#def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
#xmlLabel=None, xmlComment=None):
##TODO : comment
#xmlio.XMLParamDrivenClass.__init__(self, parameters, xmlHandler,
#xmlLabel, xmlComment)
#sampleFlag = self.parameters[self.P_SAMPLE_FLAG]
#valIni = self.parameters[self.P_VAL_INI]
#useTrueVal = self.parameters[self.P_USE_TRUE_NRLS]
#self.TrueNrlsFilename = self.parameters[self.P_TrueNrlFilename]
#an = ['condition', 'voxel']
#GibbsSamplerVariable.__init__(self,'nrl', valIni=valIni,
#sampleFlag=sampleFlag,
#useTrueValue=useTrueVal,
#axes_names=an,
#value_label='PM NRL')
#self.outputNrls = self.parameters[self.P_OUTPUT_NRL]
#def computeComponentsApost(self, variables, j, gTQg):
#sIMixtP = variables[self.samplerEngine.I_MIXT_PARAM]
#var = sIMixtP.getCurrentVars()
##var_nrlSess = #TODO
#mean = sIMixtP.getCurrentMeans()
#rb = variables[self.samplerEngine.I_NOISE_VAR].currentValue
#varXh = variables[self.samplerEngine.I_HRF].varXh
#nrls = self.currentValue
#gTQgjrb = gTQg[j]/rb
#if pyhrf.verbose > 4:
#print 'Current components:'
#print 'mean CI = %f, var CI = %f' %(mean[self.L_CI,j], var[self.L_CI,j])
#print 'mean CA = %f, var CA = %f' %(mean[self.L_CA,j], var[self.L_CA,j])
#print 'gTQg =', gTQg[j]
#pyhrf.verbose(6, 'gTQg[%d] %s:'%(j,str(gTQg[j].shape)))
#pyhrf.verbose.printNdarray(6, gTQg[j])
#pyhrf.verbose(6, 'rb %s :'%str(rb.shape))
#pyhrf.verbose.printNdarray(6, rb)
#pyhrf.verbose(6, 'gTQgjrb %s :'%str(gTQgjrb.shape))
#pyhrf.verbose.printNdarray(6, gTQgjrb)
#ej = self.varYtilde + nrls[j,:] \
#* repmat(varXh[:,j],self.nbVox, 1).transpose()
#pyhrf.verbose(6, 'varYtilde %s :'%str((self.varYtilde.shape)))
#pyhrf.verbose.printNdarray(6, self.varYtilde)
#pyhrf.verbose(6, 'nrls[%d,:] %s :'%(j,nrls[j,:]))
#pyhrf.verbose.printNdarray(6, nrls[j,:])
#pyhrf.verbose(6, 'varXh[:,%d] %s :'%(j,str(varXh[:,j].shape)))
#pyhrf.verbose.printNdarray(6, varXh[:,j])
#pyhrf.verbose(6, 'repmat(varXh[:,%d],self.nbVox, 1).transpose()%s:' \
#%(j,str((repmat(varXh[:,j],self.nbVox, 1).transpose().shape))))
#pyhrf.verbose.printNdarray(6, repmat(varXh[:,j],self.nbVox, 1).transpose())
#pyhrf.verbose(6, 'ej %s :'%str((ej.shape)))
#pyhrf.verbose.printNdarray(6, ej)
#np.divide(np.dot(self.varXhtQ[j,:],ej), rb, self.varXjhtQjeji)
#if pyhrf.verbose.verbosity > 5:
#pyhrf.verbose(5, 'np.dot(self.varXhtQ[j,:],ej) %s :' \
#%str(np.dot(self.varXhtQ[j,:],ej).shape))
#pyhrf.verbose.printNdarray(5, np.dot(self.varXhtQ[j,:],ej))
#pyhrf.verbose(5, 'self.varXjhtQjeji %s :' \
#%str(self.varXjhtQjeji.shape))
#pyhrf.verbose.printNdarray(5, self.varXjhtQjeji)
#for c in xrange(self.nbClasses):
##print 'var[%d,%d] :' %(c,j), var[c,j]
##print 'mean[%d,%d] :' %(c,j), mean[c,j]
#self.varClassApost[c,j,:] = 1./(1./var[c,j] + 1/var_nrlSess)
#if 0:
#print 'shape of self.varClassApost[c,j,:] :', \
#self.varClassApost.shape
##print 'varClassApost[%d,%d,:]:' %(c,j), self.varClassApost[c,j,:]
#np.sqrt(self.varClassApost[c,j,:], self.sigClassApost[c,j,:])
#if c > 0: # assume 0 stands for inactivating class
#np.multiply(self.varClassApost[c,j,:],
#add(mean[c,j]/var[c,j], nrls[:,j,c].sum()/var_nrlSess),
#self.meanClassApost[c,j,:])
##nrls[:,j,c].sum() = sum on sessions of aj,m,s
#else:
#np.multiply(self.varClassApost[c,j,:], nrls[:,j,c].sum()/var_nrlSess,
#self.meanClassApost[c,j,:])
#pyhrf.verbose(5, 'meanClassApost %d cond %d :'%(c,j))
#pyhrf.verbose.printNdarray(5, self.meanClassApost[c,j,:])
#pyhrf.verbose(5, 'varClassApost %d cond %d :'%(c,j))
#pyhrf.verbose.printNdarray(5, self.varClassApost[c,j,:])
#pyhrf.verbose(5, 'shape of self.varClassApost[c,j,:] : %s' \
#%str(self.varClassApost.shape))
class NRL_Multi_Sess_Sampler(xmlio.XMLParamDrivenClass, GibbsSamplerVariable):
# parameters specifications :
P_SAMPLE_FLAG = 'sampleFlag'
P_VAL_INI = 'initialValue'
P_USE_TRUE_NRLS = 'useTrueNrls'
P_TrueNrlFilename = 'TrueNrlFilename'
P_OUTPUT_NRL = 'writeResponsesOutput'
# parameters definitions and default values :
defaultParameters = {
P_SAMPLE_FLAG : True,
P_VAL_INI : None,
P_USE_TRUE_NRLS : False, #False,
P_OUTPUT_NRL : True,
P_TrueNrlFilename : None, #'./nrls.nii',
}
if pyhrf.__usemode__ == pyhrf.DEVEL:
parametersToShow = [P_SAMPLE_FLAG, P_VAL_INI, P_USE_TRUE_NRLS,
P_TrueNrlFilename, P_OUTPUT_NRL
]
elif pyhrf.__usemode__ == pyhrf.ENDUSER:
parametersToShow = [P_OUTPUT_NRL]
parametersComments = {
P_TrueNrlFilename :'Define the filename of simulated NRLs.\n'\
'It is taken into account when NRLs is not sampled.',
}
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
#TODO : comment
xmlio.XMLParamDrivenClass.__init__(self, parameters, xmlHandler,
xmlLabel, xmlComment)
sampleFlag = self.parameters[self.P_SAMPLE_FLAG]
valIni = self.parameters[self.P_VAL_INI]
useTrueVal = self.parameters[self.P_USE_TRUE_NRLS]
self.TrueNrlsFilename = self.parameters[self.P_TrueNrlFilename]
an = ['session', 'condition', 'voxel']
GibbsSamplerVariable.__init__(self,'nrl_by_session', valIni=valIni,
sampleFlag=sampleFlag,
useTrueValue=useTrueVal,
axes_names=an,
value_label='PM NRL')
self.outputNrls = self.parameters[self.P_OUTPUT_NRL]
def linkToData(self, dataInput):
self.dataInput = dataInput
self.nbConditions = self.dataInput.nbConditions
self.nbVox = self.dataInput.nbVoxels
self.ny = self.dataInput.ny
self.nbSessions = self.dataInput.nbSessions
if dataInput.simulData is not None:
if isinstance(dataInput.simulData, dict):
if dataInput.simulData.has_key('nrls'):
nrls = dataInput.simulData['nrls']
if isinstance(nrls, xndarray):
self.trueValue = nrls.reorient(['condition','voxel']).data
else:
self.trueValue = nrls
elif isinstance(dataInput.simulData, list):
v = np.array([sd['nrls_session'].astype(np.float64)\
for sd in dataInput.simulData])
self.trueValue = v
else:
if hasattr(dataInput.simulData[0], 'nrls_session'):
self.trueValue = np.array([dataInput.simulData[s]['nrls_session'].data.astype(np.float64)\
for s in xrange(self.nbSessions)])
else:
self.trueValue = None
def checkAndSetInitValue(self, variables):
pyhrf.verbose(3, 'NRL_Multi_Sess_Sampler.checkAndSetInitNRLs ...')
smplNrlBar = variables[self.samplerEngine.I_NRLS_BAR]
smplNrlBar.checkAndSetInitValue(variables)
smplDrift = variables[self.samplerEngine.I_DRIFT]
smplDrift.checkAndSetInitValue(variables)
self.varYtilde = np.zeros((self.nbSessions, self.ny, self.nbVox), dtype=np.float64)
self.sumaXh = np.zeros((self.nbSessions, self.ny, self.nbVox), dtype=np.float64)
self.varYbar = np.zeros((self.nbSessions, self.ny, self.nbVox), dtype=np.float64)
if self.useTrueValue:
if self.trueValue is None:
raise Exception('Needed a true value for nrls init but '\
'None defined')
else:
self.currentValue = self.trueValue.astype(np.float64)
if self.currentValue is None :
#nrlsIni = np.zeros((self.nbSessions,self.nbConditions, self.nbVox), dtype=np.float64)
## Init Nrls according to classes definitions :
#smplGaussP = variables[self.samplerEngine.I_NRLs_Gauss_P]
## ensure that mixture parameters are correctly set
#smplGaussP.checkAndSetInitValue(variables)
#var_nrls = smplMixtP.getCurrentVars()
#means = smplMixtP.getCurrentMeans()
#for s in xrange(self.nbSessions):
#for m in xrange(self.nbConditions):
#for j in xrange(self.nbVox):
#nrlsIni[s,m,j] = np.random.randn() \
#* var_nrls**0.5 + means[s,m]
#self.currentValue = nrlsIni
##HACK (?)
if 0:
self.currentValue = np.zeros((self.nbSessions, self.nbConditions, self.nbVox),
dtype=np.float64)
nrl_bar = self.samplerEngine.getVariable('nrl').currentValue
var_sess = self.samplerEngine.getVariable('variance_nrls_by_session').currentValue
labels = self.samplerEngine.getVariable('nrl').labels
for m in xrange(self.nbConditions):
Ac_pos = np.where(labels[m])
for s in xrange(self.nbSessions):
Nrls_sess = np.random.randn((self.nbVox))*var_sess**0.5 #+ nrl_bar[s,m]
Nrls_sess[Ac_pos[0]] = np.random.randn((Ac_pos[0].size))*var_sess**0.5 + 30
self.currentValue[s,m] = Nrls_sess.astype(np.float64)
#self.currentValue[s]
self.currentValue = np.zeros((self.nbSessions, self.nbConditions, self.nbVox),
dtype=np.float64)+20
def saveCurrentValue(self, it):
GibbsSamplerVariable.saveCurrentValue(self, it)
def samplingWarmUp(self, variables):
"""
#TODO : comment
"""
# Precalculations and allocations :
smplHRF = self.samplerEngine.getVariable('hrf')
imm=[]
aXh=[]
sumaXh=[]
computeVarYtildeOpt=[]
#self.egsurrb = np.empty(( self.nbConditions, self.nbVox), dtype=float)
#self.varYtilde = np.zeros((self.nbSessions, self.ny, self.nbVox), dtype=np.float64)
#self.varYbar = np.zeros((self.nbSessions, self.ny, self.nbVox), dtype=np.float64)
#self.sumaXh = np.zeros((self.nbSessions, self.ny, self.nbVox), dtype=np.float64)
self.aa = np.zeros((self.nbSessions, self.nbConditions, self.nbConditions, self.nbVox), dtype=float)
self.meanApost = np.zeros((self.nbSessions, self.nbConditions, self.nbVox), dtype=float)
self.sigApost = np.zeros((self.nbSessions,self.nbConditions, self.nbVox), dtype=float)
for s in xrange(self.nbSessions):
self.imm = self.samplerEngine.getVariable('beta').currentValue[0] < 0
imm.append(self.imm)
self.computeVarYTildeSessionOpt(smplHRF.varXh[s], s)
self.aXh = np.empty((self.nbVox, self.ny, self.nbConditions), dtype=float)
aXh.append(self.aXh)
self.computeAA(self.currentValue, self.aa)
imm = np.array(self.imm)
aXh = np.array(self.aXh)
self.iteration = 0
pyhrf.verbose(5,'varYtilde at end of warm up %s' \
%str(self.varYtilde.shape))
def computeAA(self, nrls, destaa):
for s in xrange(self.nbSessions):
for j in xrange(self.nbConditions):
for k in xrange(self.nbConditions):
np.multiply(nrls[s,j,:], nrls[s,k,:],
destaa[s,j,k])
def computeVarYTildeSessionOpt(self, varXh, s):
#print 'shapes:', varXh.shape, self.currentValue[s].shape, self.dataInput.varMBY[s].shape, self.varYtilde[s].shape, self.sumaXh[s].shape
computeYtilde(varXh, self.currentValue[s], self.dataInput.varMBY[s],
self.varYtilde[s], self.sumaXh[s])
pyhrf.verbose(5,'varYtilde %s' %str(self.varYtilde[s].shape))
pyhrf.verbose.printNdarray(5, self.varYtilde[s])
matPl = self.samplerEngine.getVariable('drift').matPl
self.varYbar[s] = self.varYtilde[s] - matPl[s]
def sampleNextAlt(self, variables):
#used in case of trueValue choice !
varXh = variables[self.samplerEngine.I_HRF].varXh
for s in xrange(self.nbSessions):
self.computeVarYTildeSessionOpt(varXh[s], s)
def computeComponentsApost(self, variables, m, varXh, s):
self.var_a = self.samplerEngine.getVariable('variance_nrls_by_session').currentValue
rb = self.samplerEngine.getVariable('noise_var').currentValue
nrls = self.currentValue
nrl_bar = self.samplerEngine.getVariable('nrl').currentValue
pyhrf.verbose(6, 'rb %s :'%str(rb.shape))
pyhrf.verbose.printNdarray(6, rb)
pyhrf.verbose(6, 'var_a %f :'%self.var_a[0])
gTg = np.diag(np.dot(varXh[s].transpose(),varXh[s]))
ejsm = self.varYbar[s] + nrls[s,m,:] \
* repmat(varXh[s][:,m],self.nbVox, 1).transpose()
#pyhrf.verbose(6, 'varYtilde %s :'%str((self.varYtilde.shape)))
#pyhrf.verbose.printNdarray(6, self.varYtilde)
#pyhrf.verbose(6, 'nrls[%d,:] %s :'%(j,nrls[j,:]))
#pyhrf.verbose.printNdarray(6, nrls[j,:])
#pyhrf.verbose(6, 'varXh[:,%d] %s :'%(j,str(varXh[:,j].shape)))
#pyhrf.verbose.printNdarray(6, varXh[:,j])
#pyhrf.verbose(6, 'repmat(varXh[:,%d],self.nbVox, 1).transpose()%s:' \
#%(j,str((repmat(varXh[:,j],self.nbVox, 1).transpose().shape))))
#pyhrf.verbose.printNdarray(6, repmat(varXh[:,j],self.nbVox, 1).transpose())
self.egsurrb = np.divide(np.dot(ejsm.transpose(), varXh[s][:,m]), rb[s,:])
#print 'varYbar:', self.varYbar[s][150]
#print 'nrls*g:', nrls[s,m,:] * repmat(varXh[s][:,m],self.nbVox, 1).transpose()
#print 'ejsm:', ejsm[:,150]
#print 'g:', varXh[s][:,m]
#print 'ejsm*g:', np.dot(ejsm.transpose(), varXh[s][:,m])[150]
#print 'nrlbar:', nrl_bar[m,150]
#print 'varXh:', varXh[s][150,m]
#print 'egsurrb', self.egsurrb[150]
self.sigApost[s,m,:] = np.sqrt(1./(1./self.var_a + gTg[m]*1./rb[s,:]))
np.multiply(self.sigApost[s,m,:]**2,
np.add(nrl_bar[m,:]/self.var_a, self.egsurrb),
self.meanApost[s,m,:])
pyhrf.verbose(6, "sigApost[s=%d,m=%d,:2]" %(s,m))
pyhrf.verbose.printNdarray(6, self.sigApost[s,m,:2])
pyhrf.verbose(6, "nrl_bar[m=%d,:2]/var_a" %(m))
pyhrf.verbose.printNdarray(6, (nrl_bar[m,:]/self.var_a)[:2])
pyhrf.verbose(6, "ejsm[:2]")
pyhrf.verbose.printNdarray(6, ejsm[:,:2])
pyhrf.verbose(6, "ejsm.(Xh)t[:2]")
pyhrf.verbose.printNdarray(6,np.dot(ejsm.transpose(), varXh[s][:,m])[:2])
pyhrf.verbose(6, "egsurrb[:2]")
pyhrf.verbose.printNdarray(6, self.egsurrb[:2])
pyhrf.verbose(6, "meanApost[s=%d,m=%d,:2]" %(s,m))
pyhrf.verbose.printNdarray(6, self.meanApost[s,m,:2])
def sampleNextInternal(self, variables):
pyhrf.verbose(3, 'NRL_Multi_Sess_Sampler.sampleNextInternal ...')
varXh = self.samplerEngine.getVariable('hrf').varXh
for s in xrange(self.nbSessions):
self.computeVarYTildeSessionOpt(varXh[s], s)
for m in xrange(self.nbConditions):
self.computeComponentsApost(variables, m, varXh, s)
for j in xrange(self.nbVox):
self.currentValue[s][m,j] = np.random.normal(self.meanApost[s,m,j], self.sigApost[s,m,j])
self.computeVarYTildeSessionOpt(varXh[s], s)
#print '""""', self.currentValue
#print 'mean apost:', self.meanApost[s,m,j]
#print 'sig apost:', self.sigApost[s,m,j]
#print 'ééééééééééééééééééééééééééééééééééééééééééééééééééééééééééééééé'
if (self.currentValue >= 1000).any() and pyhrf.__usemode__ == pyhrf.DEVEL:
pyhrf.verbose(2, "Weird NRL values detected ! %d/%d" \
%((self.currentValue >= 1000).sum(),
self.nbVox*self.nbConditions) )
self.computeAA(self.currentValue, self.aa)
self.iteration += 1 #TODO : factorize !!
def cleanMemory(self):
# clean memory of temporary variables :
del self.sigApost
del self.meanApost
del self.aa
del self.aXh
del self.varYtilde
del self.varXhtQ
del self.sumaXh
if hasattr(self,'nrlsSamples'):
del self.nrlsSamples
del self.voxIdx
#def setFinalValue(self):
#for s in xrange(self.nbSessions):
#self.finalValue[s,:,:] = self.getMean(2)
##Mean over iterations ==> give many iterations to ensure convergence!
def finalizeSampling(self):
GibbsSamplerVariable.finalizeSampling(self)
smplHRF = self.samplerEngine.getVariable('hrf')
# Correct sign ambiguity :
sign_error = smplHRF.detectSignError()
pyhrf.verbose(2, 'sign error - Flipping nrls')
self.finalValue_sign_corr = self.finalValue * (1-2*sign_error)
# Correct hrf*nrl scale ambiguity :
scaleF = smplHRF.getScaleFactor()
# Use HRF amplitude :
pyhrf.verbose(3, 'scaleF=%1.2g' %scaleF)
pyhrf.verbose(3, 'self.finalValue : %1.2g - %1.2g' \
%(self.finalValue.min(), self.finalValue.max()))
self.finalValueScaleCorr = self.finalValue * scaleF
def getOutputs(self):
#outputs = GibbsSamplerVariable.getOutputs(self)
cn = self.dataInput.cNames
sn = self.dataInput.sNames
outputs = {}
an = ['session', 'condition', 'voxel']
if self.meanHistory is not None:
outName = self.name+'_pm_history'
if hasattr(self,'obsHistoryIts'):
axes_domains = {'iteration': self.obsHistoryIts}
else:
axes_domains = {}
axes_domains.update(self.axes_domains)
axes_names = ['iteration'] + an
outputs[outName] = xndarray(self.meanHistory,
axes_names=axes_names,
axes_domains=axes_domains,
value_label=self.value_label)
if hasattr(self, 'smplHistory') and self.smplHistory is not None:
axes_names = ['iteration'] + an
outName = self.name+'_smpl_history'
if hasattr(self,'smplHistoryIts'):
axes_domains = {'iteration': self.smplHistoryIts}
else:
axes_domains = {}
axes_domains.update(self.axes_domains)
outputs[outName] = xndarray(self.smplHistory,
axes_names=axes_names,
axes_domains=axes_domains,
value_label=self.value_label)
pyhrf.verbose(4, '%s final value:' %self.name)
pyhrf.verbose.printNdarray(4, self.finalValue)
if 1 and hasattr(self, 'error'):
err = self.error**.5
else:
err = None
c = xndarray(self.finalValue,
axes_names=self.axes_names,
axes_domains=self.axes_domains,
value_label=self.value_label)
outputs[self.name+'_pm'] = c
axes_names = ['voxel']
roi_lab_vol = np.zeros(self.nbVox, dtype=np.int32) + \
self.dataInput.roiId
outputs['roi_mapping'] = xndarray(roi_lab_vol, axes_names=axes_names,
value_label='ROI')
if pyhrf.__usemode__ == pyhrf.DEVEL:
if hasattr(self, 'finalValue_sign_corr'):
outputs['nrl_sign_corr'] = xndarray(self.finalValue_sign_corr,
axes_names=self.axes_names,
axes_domains=self.axes_domains,
value_label=self.value_label)
axes_names = ['session', 'condition', 'voxel']
axes_domains = {'condition' : cn, 'session' : sn}
if self.dataInput.simulData is not None:
#trueNrls = self.dataInput.simulData.nrls.data
trueNrls = self.trueValue
if trueNrls.shape == self.finalValue.shape:
axes_names = ['session', 'condition', 'voxel']
ad = {'condition':cn, 'session' : sn}
relErrorNrls = abs(trueNrls - self.finalValue)
outputs['nrl_pm_error'] = xndarray(relErrorNrls,
axes_names=axes_names,
axes_domains=ad)
axes_names = ['session', 'condition']
nt = (trueNrls.astype(np.float32) - \
self.finalValue.astype(np.float32))**2
outputs['nrl_pm_rmse'] = xndarray(nt.mean(2),
axes_names=axes_names,
axes_domains=ad)
if 0:
axes_names = ['type','session', 'time', 'voxel']
outputs['ysignals'] = xndarray(np.array([self.dataInput.varMBY,self.varYbar,self.sumaXh]),
axes_names=axes_names,
axes_domains={'type':['Y','Ybar','sumaXh']})
axes_names = ['session', 'time', 'voxel']
outputs['ytilde'] = xndarray(self.varYtilde,
axes_names=axes_names,)
outputs['ybar'] = xndarray(self.varYbar,
axes_names=axes_names,)
outputs['sumaXh'] = xndarray(self.sumaXh,
axes_names=axes_names,)
outputs['mby'] = xndarray(np.array(self.dataInput.varMBY),
axes_names=axes_names,)
return outputs
class Variance_GaussianNRL_Multi_Sess(xmlio.XMLParamDrivenClass, GibbsSamplerVariable):
'''
'''
P_VAL_INI = 'initialValue'
P_SAMPLE_FLAG = 'sampleFlag'
P_USE_TRUE_VALUE = 'useTrueValue'
defaultParameters = {
P_USE_TRUE_VALUE : False,
P_VAL_INI : np.array([1.]),
P_SAMPLE_FLAG : False, #By default, beta>0 -> SMM
}
if pyhrf.__usemode__ == pyhrf.ENDUSER:
parametersToShow = [P_USE_TRUE_VALUE]
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
#TODO : comment
xmlio.XMLParamDrivenClass.__init__(self, parameters, xmlHandler,
xmlLabel, xmlComment)
sampleFlag = self.parameters[self.P_SAMPLE_FLAG]
valIni = self.parameters[self.P_VAL_INI]
useTrueVal = self.parameters[self.P_USE_TRUE_VALUE]
GibbsSamplerVariable.__init__(self, 'variance_nrls_by_session', valIni=valIni,
useTrueValue=useTrueVal,
sampleFlag=sampleFlag)
def linkToData(self, dataInput):
self.dataInput = dataInput
self.nbConditions = self.dataInput.nbConditions
self.nbVoxels = self.dataInput.nbVoxels
self.nbSessions = self.dataInput.nbSessions
if dataInput.simulData is not None:
#self.trueValue = np.array(np.array([dataInput.simulData[s]['nrls_session'] for s in xrange(self.nbSessions)]).var(0))
self.trueValue = np.array([dataInput.simulData[0]['var_sess']])
def checkAndSetInitValue(self, variables):
if self.useTrueValue:
if self.trueValue is None:
raise Exception('Needed a true value for %s init but '\
'None defined' %self.name)
else:
self.currentValue = self.trueValue.astype(np.float64)
def sampleNextInternal(self, variables):
nrls = variables[self.samplerEngine.I_NRLS_SESS].currentValue
nrlsBAR = variables[self.samplerEngine.I_NRLS_BAR].currentValue
sum_s_j_m=0
for s in xrange(self.nbSessions):
for m in xrange(self.nbConditions):
for j in xrange(self.nbVoxels):
sum_s_j_m += (nrls[s][m][j] - nrlsBAR[m][j])**2
alpha = (self.nbSessions*self.nbConditions*self.nbVoxels-1)/2.
beta_g = 0.5*sum_s_j_m
self.currentValue[0] = 1.0/np.random.gamma(alpha, 1/beta_g)
#self.currentValue.astype(np.float64)
#def sampleNextAlt(self, variables):
class BiGaussMixtureParamsSamplerWithRelVar_OLD(BiGaussMixtureParamsSampler):
defaultParameters = copyModule.deepcopy(BiGaussMixtureParamsSampler.defaultParameters)
parametersToShow = copyModule.deepcopy(BiGaussMixtureParamsSampler.parametersToShow)
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
BiGaussMixtureParamsSampler.__init__(self, parameters, xmlHandler, xmlLabel, xmlComment)
def computeWithProperPriorsWithRelVar(self, nrlsj, j, cardCIj, cardCAj, wj):
if(wj):
if cardCIj > 1: # If we have only one voxel inactive we can't compute inactive variance
nu0j = .5*np.dot(self.nrlCI[j], self.nrlCI[j])
varCIj = 1.0/np.random.gamma(.5*cardCIj + self.varCIPrAlpha,
1/(nu0j + self.varCIPrBeta))
else :
pyhrf.verbose(6,'using only hyper priors for CI (empty class) ...')
varCIj = 1.0/np.random.gamma(self.varCIPrAlpha, 1/self.varCIPrBeta)
if cardCAj > 1:
eta1j = np.mean(self.nrlCA[j])
nrlCACentered = self.nrlCA[j] - self.currentValue[self.I_MEAN_CA,j]#eta1j
nu1j = .5 * np.dot(nrlCACentered, nrlCACentered)
#r = np.random.gamma(0.5*(cardCAj-1),2/nu1j)
varCAj = 1.0/np.random.gamma(0.5*cardCAj + self.varCAPrAlpha,
1/(nu1j + self.varCAPrBeta))
else :
pyhrf.verbose(6,'using only hyper priors for CA (empty class) ...')
eta1j = 0.0
varCAj = 1.0/np.random.gamma(self.varCAPrAlpha, 1/self.varCAPrBeta)
invVarLikelihood = (cardCAj+0.)/varCAj
meanCAVarAPost = 1/(invVarLikelihood + 1/self.meanCAPrVar)
rPrMV = self.meanCAPrMean/self.meanCAPrVar
meanCAMeanAPost = meanCAVarAPost * (eta1j*invVarLikelihood+rPrMV)
meanCAj = np.random.normal(meanCAMeanAPost, meanCAVarAPost**0.5)
else:
nu0j = .5*np.dot(nrlsj, nrlsj)
varCIj = 1.0/np.random.gamma(.5*self.nbVox + self.varCIPrAlpha,
1/(nu0j + self.varCIPrBeta))
varCAj = 1.0/np.random.gamma(self.varCAPrAlpha, 1/self.varCAPrBeta)
meanCAj = np.random.normal(self.meanCAPrMean, self.meanCAPrVar**0.5)
return varCIj,meanCAj,varCAj
def sampleNextInternal(self, variables):
#TODO : comment
nrlsSmpl = variables[self.samplerEngine.I_NRLS]
cardCA = nrlsSmpl.cardClass[self.L_CA,:]
cardCI = nrlsSmpl.cardClass[self.L_CI,:]
w = variables[self.samplerEngine.I_W].currentValue
for j in xrange(self.nbConditions):
vICI = nrlsSmpl.voxIdx[nrlsSmpl.L_CI][j]
vICA = nrlsSmpl.voxIdx[nrlsSmpl.L_CA][j]
self.nrlCI[j] = nrlsSmpl.currentValue[j, vICI]
self.nrlCA[j] = nrlsSmpl.currentValue[j, vICA]
for j in xrange(self.nbConditions):
#for j in np.random.permutation(self.nbConditions):
if self.hyperPriorFlag:
varCIj,meanCAj,varCAj = self.computeWithProperPriorsWithRelVar(nrlsSmpl.currentValue[j,:], j, cardCI[j],
cardCA[j], w[j])
else:
raise Exception('Prior distributions of mixture parameters should be Proper NOT Jeffrey')
# No Jeffrey Prior, it's complicated with relevance variable
self.currentValue[self.I_VAR_CI, j] = varCIj
self.currentValue[self.I_MEAN_CA, j] = meanCAj #absolute(meanCAj)
self.currentValue[self.I_VAR_CA, j] = varCAj
pyhrf.verbose(5, 'varCI,%d=%f'%(j,self.currentValue[self.I_VAR_CI,j]))
pyhrf.verbose(5, 'meanCA,%d=%f'%(j,self.currentValue[self.I_MEAN_CA,j]))
pyhrf.verbose(5, 'varCA,%d = %f'%(j,self.currentValue[self.I_VAR_CA,j]))
class BiGaussMixtureParamsSamplerWithRelVar(BiGaussMixtureParamsSampler):
defaultParameters = copyModule.deepcopy(BiGaussMixtureParamsSampler.defaultParameters)
parametersToShow = copyModule.deepcopy(BiGaussMixtureParamsSampler.parametersToShow)
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
BiGaussMixtureParamsSampler.__init__(self, parameters, xmlHandler, xmlLabel, xmlComment)
def computeWithProperPriorsWithRelVar(self, nrlsj, j, cardCIj, cardCAj, wj):
#if j ==1:
#print 'NBInactvox =',cardCIj,', NBActVox =',cardCAj
if cardCIj > 1: # If we have only one voxel inactive we can't compute inactive variance
A0 = self.varCIPrAlpha + 0.5*self.nbVox
A1 = self.varCIPrAlpha + 0.5*cardCIj
B0 = self.varCIPrBeta + 0.5*np.dot(nrlsj, nrlsj)
B1 = self.varCIPrBeta + 0.5*np.dot(self.nrlCI[j], self.nrlCI[j])
varCIj = (1 - wj) * (1.0/np.random.gamma(A0,1/B0)) + wj*(1.0/np.random.gamma(A1,1/B1))
#if j==1:
#print 'A1 =',A1,', B1 =',B1,', v0 =',varCIj
else :
pyhrf.verbose(6,'using only hyper priors for CI (empty class) ...')
varCIj = 1.0/np.random.gamma(self.varCIPrAlpha, 1/self.varCIPrBeta)
if cardCAj > 1:
eta1j = np.mean(self.nrlCA[j])
nrlCACentered = self.nrlCA[j] - self.currentValue[self.I_MEAN_CA,j]#eta1j
nu1j = .5 * np.dot(nrlCACentered, nrlCACentered)
A0 = self.varCAPrAlpha
A1 = self.varCAPrAlpha + 0.5*cardCAj
B0 = self.varCAPrBeta
B1 = self.varCAPrBeta + nu1j
#r = np.random.gamma(0.5*(cardCAj-1),2/nu1j)
varCAj = (1 - wj) * (1.0/np.random.gamma(A0,1/B0)) + wj*(1.0/np.random.gamma(A1,1/B1))
#if j==1:
#print 'A1 =',A1,', B1 =',B1,', v1 =',varCAj
else :
pyhrf.verbose(6,'using only hyper priors for CA (empty class) ...')
eta1j = 0.0
varCAj = 1.0/np.random.gamma(self.varCAPrAlpha, 1/self.varCAPrBeta)
invVarLikelihood = (cardCAj+0.)/varCAj
meanCAVarAPost = 1/(invVarLikelihood + 1/self.meanCAPrVar)
rPrMV = self.meanCAPrMean/self.meanCAPrVar
meanCAMeanAPost = meanCAVarAPost * (eta1j*invVarLikelihood+rPrMV)
meanCAj = (1 - wj) * np.random.normal(self.meanCAPrMean,self.meanCAPrVar**0.5) + wj * np.random.normal(meanCAMeanAPost,meanCAVarAPost**0.5)
#print 'Cond =',j,', v0 =',varCIj,', v1 =',varCAj,', m1 =',meanCAj
return varCIj,meanCAj,varCAj
def sampleNextInternal(self, variables):
#TODO : comment
nrlsSmpl = variables[self.samplerEngine.I_NRLS]
cardCA = nrlsSmpl.cardClass[self.L_CA,:]
cardCI = nrlsSmpl.cardClass[self.L_CI,:]
w = variables[self.samplerEngine.I_W].currentValue
for j in xrange(self.nbConditions):
vICI = nrlsSmpl.voxIdx[nrlsSmpl.L_CI][j]
vICA = nrlsSmpl.voxIdx[nrlsSmpl.L_CA][j]
self.nrlCI[j] = nrlsSmpl.currentValue[j, vICI]
self.nrlCA[j] = nrlsSmpl.currentValue[j, vICA]
for j in xrange(self.nbConditions):
#for j in np.random.permutation(self.nbConditions):
if self.hyperPriorFlag:
varCIj,meanCAj,varCAj = self.computeWithProperPriorsWithRelVar(nrlsSmpl.currentValue[j,:], j, cardCI[j],
cardCA[j], w[j])
else:
raise Exception('Prior distributions of mixture parameters should be Proper NOT Jeffrey')
# No Jeffrey Prior, it's complicated with relevance variable
self.currentValue[self.I_VAR_CI, j] = varCIj
self.currentValue[self.I_MEAN_CA, j] = meanCAj #absolute(meanCAj)
self.currentValue[self.I_VAR_CA, j] = varCAj
pyhrf.verbose(5, 'varCI,%d=%f'%(j,self.currentValue[self.I_VAR_CI,j]))
pyhrf.verbose(5, 'meanCA,%d=%f'%(j,self.currentValue[self.I_MEAN_CA,j]))
pyhrf.verbose(5, 'varCA,%d = %f'%(j,self.currentValue[self.I_VAR_CA,j]))
class MixtureWeightsSampler(xmlio.XMLParamDrivenClass, GibbsSamplerVariable):
"""
#TODO : comment
"""
P_VAL_INI = 'initialValue'
P_SAMPLE_FLAG = 'sampleFlag'
defaultParameters = {
P_VAL_INI : None,
P_SAMPLE_FLAG : False, #By default, beta>0 -> SMM
}
if pyhrf.__usemode__ == pyhrf.ENDUSER:
parametersToShow = []
def __init__(self, parameters=None, xmlHandler=NumpyXMLHandler(),
xmlLabel=None, xmlComment=None):
#TODO : comment
xmlio.XMLParamDrivenClass.__init__(self, parameters, xmlHandler,
xmlLabel, xmlComment)
sampleFlag = self.parameters[self.P_SAMPLE_FLAG]
valIni = self.parameters[self.P_VAL_INI]
GibbsSamplerVariable.__init__(self, 'mixt_weights', valIni=valIni,
sampleFlag=sampleFlag)
def linkToData(self, dataInput):
self.dataInput = dataInput
self.nbConditions = self.dataInput.nbConditions
self.nbVoxels = self.dataInput.nbVoxels
def checkAndSetInitValue(self, variables):
self.nbClasses = self.samplerEngine.getVariable('nrl').nbClasses
if self.currentValue == None :
self.currentValue = np.zeros( (self.nbClasses, self.nbConditions),
dtype = float)+0.5
if 0 and not self.sampleFlag and self.dataInput.simulData != None :
sn = self.dataInput.simulData.nrls
for c in xrange(self.nbClasses):
#print 'self.currentValue[c,:]:', self.currentValue[c,:].shape
#print '(sn.labels==c).sum(axis=1,dtype=float):', (sn.labels==c).sum(axis=1,dtype=float).shape
#print 'sn.labels :', sn.labels
self.currentValue[c,:] = (sn.labels==c).sum(axis=1,dtype=float) \
/self.nbVoxels
def sampleNextInternal(self, variables):
#TODO : comment
##print '- Sampling MixtWeights ...'
#self.currentValue = np.zeros(self.nbConditions, dtype=float)
nrlsSmpl = self.samplerEngine.getVariable('nrl')
lca = nrlsSmpl.L_CA
lci = nrlsSmpl.L_CI
card = nrlsSmpl.cardClass
nbv = self.nbVoxels
for j in xrange(self.nbConditions):
if self.nbClasses == 2:
self.currentValue[lca,j] = np.random.beta(card[lca,j]+1.5,
nbv-card[lca,j]+1.5)
self.currentValue[lci,j] = 1 - self.currentValue[lca,j]
elif self.nbClasses == 3:
#TODO : sampling with dirichlet process
raise NotImplementedError()
##print '- Done sampling MixtWeights ...\n'
assert (self.currentValue.sum(0) == 1.).all()
def getOutputs(self):
outputs = {}
if pyhrf.__usemode__ == pyhrf.DEVEL:
outputs = GibbsSamplerVariable.getOutputs(self)
return outputs
|
philouc/pyhrf
|
python/pyhrf/jde/nrl/bigaussian.py
|
Python
|
gpl-3.0
| 196,426
|
[
"Gaussian"
] |
5cd1bb532d58dd82f5c71ddb801716572565b16742d485338c2edfc791aca276
|
"""Shared functionality for interacting with Galaxy.
"""
|
lpantano/bcbio-nextgen
|
bcbio/galaxy/__init__.py
|
Python
|
mit
| 57
|
[
"Galaxy"
] |
f758cbe85bcd4b79197060c229afc94861b3772bdfabdc14ce41a7937891bb95
|
"""Hilbert spaces for quantum mechanics.
Authors:
* Brian Granger
* Matt Curry
"""
from __future__ import print_function, division
from sympy import Basic, Interval, oo, sympify
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.core.compatibility import reduce
__all__ = [
'HilbertSpaceError',
'HilbertSpace',
'ComplexSpace',
'L2',
'FockSpace'
]
#-----------------------------------------------------------------------------
# Main objects
#-----------------------------------------------------------------------------
class HilbertSpaceError(QuantumError):
pass
#-----------------------------------------------------------------------------
# Main objects
#-----------------------------------------------------------------------------
class HilbertSpace(Basic):
"""An abstract Hilbert space for quantum mechanics.
In short, a Hilbert space is an abstract vector space that is complete
with inner products defined [1]_.
Examples
========
>>> from sympy.physics.quantum.hilbert import HilbertSpace
>>> hs = HilbertSpace()
>>> hs
H
References
==========
.. [1] http://en.wikipedia.org/wiki/Hilbert_space
"""
def __new__(cls):
obj = Basic.__new__(cls)
return obj
@property
def dimension(self):
"""Return the Hilbert dimension of the space."""
raise NotImplementedError('This Hilbert space has no dimension.')
def __add__(self, other):
return DirectSumHilbertSpace(self, other)
def __radd__(self, other):
return DirectSumHilbertSpace(other, self)
def __mul__(self, other):
return TensorProductHilbertSpace(self, other)
def __rmul__(self, other):
return TensorProductHilbertSpace(other, self)
def __pow__(self, other, mod=None):
if mod is not None:
raise ValueError('The third argument to __pow__ is not supported \
for Hilbert spaces.')
return TensorPowerHilbertSpace(self, other)
def __contains__(self, other):
"""Is the operator or state in this Hilbert space.
This is checked by comparing the classes of the Hilbert spaces, not
the instances. This is to allow Hilbert Spaces with symbolic
dimensions.
"""
if other.hilbert_space.__class__ == self.__class__:
return True
else:
return False
def _sympystr(self, printer, *args):
return u('H')
def _pretty(self, printer, *args):
ustr = u('\N{LATIN CAPITAL LETTER H}')
return prettyForm(ustr)
def _latex(self, printer, *args):
return r'\mathcal{H}'
class ComplexSpace(HilbertSpace):
"""Finite dimensional Hilbert space of complex vectors.
The elements of this Hilbert space are n-dimensional complex valued
vectors with the usual inner product that takes the complex conjugate
of the vector on the right.
A classic example of this type of Hilbert space is spin-1/2, which is
``ComplexSpace(2)``. Generalizing to spin-s, the space is
``ComplexSpace(2*s+1)``. Quantum computing with N qubits is done with the
direct product space ``ComplexSpace(2)**N``.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.quantum.hilbert import ComplexSpace
>>> c1 = ComplexSpace(2)
>>> c1
C(2)
>>> c1.dimension
2
>>> n = symbols('n')
>>> c2 = ComplexSpace(n)
>>> c2
C(n)
>>> c2.dimension
n
"""
def __new__(cls, dimension):
dimension = sympify(dimension)
r = cls.eval(dimension)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, dimension)
return obj
@classmethod
def eval(cls, dimension):
if len(dimension.atoms()) == 1:
if not (dimension.is_Integer and dimension > 0 or dimension is oo
or dimension.is_Symbol):
raise TypeError('The dimension of a ComplexSpace can only'
'be a positive integer, oo, or a Symbol: %r'
% dimension)
else:
for dim in dimension.atoms():
if not (dim.is_Integer or dim is oo or dim.is_Symbol):
raise TypeError('The dimension of a ComplexSpace can only'
' contain integers, oo, or a Symbol: %r'
% dim)
@property
def dimension(self):
return self.args[0]
def _sympyrepr(self, printer, *args):
return "%s(%s)" % (self.__class__.__name__,
printer._print(self.dimension, *args))
def _sympystr(self, printer, *args):
return "C(%s)" % printer._print(self.dimension, *args)
def _pretty(self, printer, *args):
ustr = u('\N{LATIN CAPITAL LETTER C}')
pform_exp = printer._print(self.dimension, *args)
pform_base = prettyForm(ustr)
return pform_base**pform_exp
def _latex(self, printer, *args):
return r'\mathcal{C}^{%s}' % printer._print(self.dimension, *args)
class L2(HilbertSpace):
"""The Hilbert space of square integrable functions on an interval.
An L2 object takes in a single sympy Interval argument which represents
the interval its functions (vectors) are defined on.
Examples
========
>>> from sympy import Interval, oo
>>> from sympy.physics.quantum.hilbert import L2
>>> hs = L2(Interval(0,oo))
>>> hs
L2([0, oo))
>>> hs.dimension
oo
>>> hs.interval
[0, oo)
"""
def __new__(cls, interval):
if not isinstance(interval, Interval):
raise TypeError('L2 interval must be an Interval instance: %r'
% interval)
obj = Basic.__new__(cls, interval)
return obj
@property
def dimension(self):
return oo
@property
def interval(self):
return self.args[0]
def _sympyrepr(self, printer, *args):
return "L2(%s)" % printer._print(self.interval, *args)
def _sympystr(self, printer, *args):
return "L2(%s)" % printer._print(self.interval, *args)
def _pretty(self, printer, *args):
pform_exp = prettyForm(u('2'))
pform_base = prettyForm(u('L'))
return pform_base**pform_exp
def _latex(self, printer, *args):
interval = printer._print(self.interval, *args)
return r'{\mathcal{L}^2}\left( %s \right)' % interval
class FockSpace(HilbertSpace):
"""The Hilbert space for second quantization.
Technically, this Hilbert space is a infinite direct sum of direct
products of single particle Hilbert spaces [1]_. This is a mess, so we have
a class to represent it directly.
Examples
========
>>> from sympy.physics.quantum.hilbert import FockSpace
>>> hs = FockSpace()
>>> hs
F
>>> hs.dimension
oo
References
==========
.. [1] http://en.wikipedia.org/wiki/Fock_space
"""
def __new__(cls):
obj = Basic.__new__(cls)
return obj
@property
def dimension(self):
return oo
def _sympyrepr(self, printer, *args):
return "FockSpace()"
def _sympystr(self, printer, *args):
return "F"
def _pretty(self, printer, *args):
ustr = u('\N{LATIN CAPITAL LETTER F}')
return prettyForm(ustr)
def _latex(self, printer, *args):
return r'\mathcal{F}'
class TensorProductHilbertSpace(HilbertSpace):
"""A tensor product of Hilbert spaces [1]_.
The tensor product between Hilbert spaces is represented by the
operator ``*`` Products of the same Hilbert space will be combined into
tensor powers.
A ``TensorProductHilbertSpace`` object takes in an arbitrary number of
``HilbertSpace`` objects as its arguments. In addition, multiplication of
``HilbertSpace`` objects will automatically return this tensor product
object.
Examples
========
>>> from sympy.physics.quantum.hilbert import ComplexSpace, FockSpace
>>> from sympy import symbols
>>> c = ComplexSpace(2)
>>> f = FockSpace()
>>> hs = c*f
>>> hs
C(2)*F
>>> hs.dimension
oo
>>> hs.spaces
(C(2), F)
>>> c1 = ComplexSpace(2)
>>> n = symbols('n')
>>> c2 = ComplexSpace(n)
>>> hs = c1*c2
>>> hs
C(2)*C(n)
>>> hs.dimension
2*n
References
==========
.. [1] http://en.wikipedia.org/wiki/Hilbert_space#Tensor_products
"""
def __new__(cls, *args):
r = cls.eval(args)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, *args)
return obj
@classmethod
def eval(cls, args):
"""Evaluates the direct product."""
new_args = []
recall = False
#flatten arguments
for arg in args:
if isinstance(arg, TensorProductHilbertSpace):
new_args.extend(arg.args)
recall = True
elif isinstance(arg, (HilbertSpace, TensorPowerHilbertSpace)):
new_args.append(arg)
else:
raise TypeError('Hilbert spaces can only be multiplied by \
other Hilbert spaces: %r' % arg)
#combine like arguments into direct powers
comb_args = []
prev_arg = None
for new_arg in new_args:
if prev_arg is not None:
if isinstance(new_arg, TensorPowerHilbertSpace) and \
isinstance(prev_arg, TensorPowerHilbertSpace) and \
new_arg.base == prev_arg.base:
prev_arg = new_arg.base**(new_arg.exp + prev_arg.exp)
elif isinstance(new_arg, TensorPowerHilbertSpace) and \
new_arg.base == prev_arg:
prev_arg = prev_arg**(new_arg.exp + 1)
elif isinstance(prev_arg, TensorPowerHilbertSpace) and \
new_arg == prev_arg.base:
prev_arg = new_arg**(prev_arg.exp + 1)
elif new_arg == prev_arg:
prev_arg = new_arg**2
else:
comb_args.append(prev_arg)
prev_arg = new_arg
elif prev_arg is None:
prev_arg = new_arg
comb_args.append(prev_arg)
if recall:
return TensorProductHilbertSpace(*comb_args)
elif len(comb_args) == 1:
return TensorPowerHilbertSpace(comb_args[0].base, comb_args[0].exp)
else:
return None
@property
def dimension(self):
arg_list = [arg.dimension for arg in self.args]
if oo in arg_list:
return oo
else:
return reduce(lambda x, y: x*y, arg_list)
@property
def spaces(self):
"""A tuple of the Hilbert spaces in this tensor product."""
return self.args
def _spaces_printer(self, printer, *args):
spaces_strs = []
for arg in self.args:
s = printer._print(arg, *args)
if isinstance(arg, DirectSumHilbertSpace):
s = '(%s)' % s
spaces_strs.append(s)
return spaces_strs
def _sympyrepr(self, printer, *args):
spaces_reprs = self._spaces_printer(printer, *args)
return "TensorProductHilbertSpace(%s)" % ','.join(spaces_reprs)
def _sympystr(self, printer, *args):
spaces_strs = self._spaces_printer(printer, *args)
return '*'.join(spaces_strs)
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (DirectSumHilbertSpace,
TensorProductHilbertSpace)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u(' ') + u('\N{N-ARY CIRCLED TIMES OPERATOR}') + u(' ')))
else:
pform = prettyForm(*pform.right(' x '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
arg_s = printer._print(self.args[i], *args)
if isinstance(self.args[i], (DirectSumHilbertSpace,
TensorProductHilbertSpace)):
arg_s = r'\left(%s\right)' % arg_s
s = s + arg_s
if i != length - 1:
s = s + r'\otimes '
return s
class DirectSumHilbertSpace(HilbertSpace):
"""A direct sum of Hilbert spaces [1]_.
This class uses the ``+`` operator to represent direct sums between
different Hilbert spaces.
A ``DirectSumHilbertSpace`` object takes in an arbitrary number of
``HilbertSpace`` objects as its arguments. Also, addition of
``HilbertSpace`` objects will automatically return a direct sum object.
Examples
========
>>> from sympy.physics.quantum.hilbert import ComplexSpace, FockSpace
>>> from sympy import symbols
>>> c = ComplexSpace(2)
>>> f = FockSpace()
>>> hs = c+f
>>> hs
C(2)+F
>>> hs.dimension
oo
>>> list(hs.spaces)
[C(2), F]
References
==========
.. [1] http://en.wikipedia.org/wiki/Hilbert_space#Direct_sums
"""
def __new__(cls, *args):
r = cls.eval(args)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, *args)
return obj
@classmethod
def eval(cls, args):
"""Evaluates the direct product."""
new_args = []
recall = False
#flatten arguments
for arg in args:
if isinstance(arg, DirectSumHilbertSpace):
new_args.extend(arg.args)
recall = True
elif isinstance(arg, HilbertSpace):
new_args.append(arg)
else:
raise TypeError('Hilbert spaces can only be summed with other \
Hilbert spaces: %r' % arg)
if recall:
return DirectSumHilbertSpace(*new_args)
else:
return None
@property
def dimension(self):
arg_list = [arg.dimension for arg in self.args]
if oo in arg_list:
return oo
else:
return reduce(lambda x, y: x + y, arg_list)
@property
def spaces(self):
"""A tuple of the Hilbert spaces in this direct sum."""
return self.args
def _sympyrepr(self, printer, *args):
spaces_reprs = [printer._print(arg, *args) for arg in self.args]
return "DirectSumHilbertSpace(%s)" % ','.join(spaces_reprs)
def _sympystr(self, printer, *args):
spaces_strs = [printer._print(arg, *args) for arg in self.args]
return '+'.join(spaces_strs)
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (DirectSumHilbertSpace,
TensorProductHilbertSpace)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u(' ') + u('\N{CIRCLED PLUS}') + u(' ')))
else:
pform = prettyForm(*pform.right(' + '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
arg_s = printer._print(self.args[i], *args)
if isinstance(self.args[i], (DirectSumHilbertSpace,
TensorProductHilbertSpace)):
arg_s = r'\left(%s\right)' % arg_s
s = s + arg_s
if i != length - 1:
s = s + r'\oplus '
return s
class TensorPowerHilbertSpace(HilbertSpace):
"""An exponentiated Hilbert space [1]_.
Tensor powers (repeated tensor products) are represented by the
operator ``**`` Identical Hilbert spaces that are multiplied together
will be automatically combined into a single tensor power object.
Any Hilbert space, product, or sum may be raised to a tensor power. The
``TensorPowerHilbertSpace`` takes two arguments: the Hilbert space; and the
tensor power (number).
Examples
========
>>> from sympy.physics.quantum.hilbert import ComplexSpace, FockSpace
>>> from sympy import symbols
>>> n = symbols('n')
>>> c = ComplexSpace(2)
>>> hs = c**n
>>> hs
C(2)**n
>>> hs.dimension
2**n
>>> c = ComplexSpace(2)
>>> c*c
C(2)**2
>>> f = FockSpace()
>>> c*f*f
C(2)*F**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Hilbert_space#Tensor_products
"""
def __new__(cls, *args):
r = cls.eval(args)
if isinstance(r, Basic):
return r
return Basic.__new__(cls, *r)
@classmethod
def eval(cls, args):
new_args = args[0], sympify(args[1])
exp = new_args[1]
#simplify hs**1 -> hs
if exp == 1:
return args[0]
#simplify hs**0 -> 1
if exp == 0:
return sympify(1)
#check (and allow) for hs**(x+42+y...) case
if len(exp.atoms()) == 1:
if not (exp.is_Integer and exp >= 0 or exp.is_Symbol):
raise ValueError('Hilbert spaces can only be raised to \
positive integers or Symbols: %r' % exp)
else:
for power in exp.atoms():
if not (power.is_Integer or power.is_Symbol):
raise ValueError('Tensor powers can only contain integers \
or Symbols: %r' % power)
return new_args
@property
def base(self):
return self.args[0]
@property
def exp(self):
return self.args[1]
@property
def dimension(self):
if self.base.dimension == oo:
return oo
else:
return self.base.dimension**self.exp
def _sympyrepr(self, printer, *args):
return "TensorPowerHilbertSpace(%s,%s)" % (printer._print(self.base,
*args), printer._print(self.exp, *args))
def _sympystr(self, printer, *args):
return "%s**%s" % (printer._print(self.base, *args),
printer._print(self.exp, *args))
def _pretty(self, printer, *args):
pform_exp = printer._print(self.exp, *args)
if printer._use_unicode:
pform_exp = prettyForm(*pform_exp.left(prettyForm(u('\N{N-ARY CIRCLED TIMES OPERATOR}'))))
else:
pform_exp = prettyForm(*pform_exp.left(prettyForm('x')))
pform_base = printer._print(self.base, *args)
return pform_base**pform_exp
def _latex(self, printer, *args):
base = printer._print(self.base, *args)
exp = printer._print(self.exp, *args)
return r'{%s}^{\otimes %s}' % (base, exp)
|
kaichogami/sympy
|
sympy/physics/quantum/hilbert.py
|
Python
|
bsd-3-clause
| 19,528
|
[
"Brian"
] |
e847ddf095871b51d249a2c09625b7355d449ce72c1a05956a4a2a308a91617b
|
import os
import sys
from time import ctime, time
import numpy as np
from ase.units import Hartree, Bohr
from ase.utils import prnt
from scipy.special.orthogonal import p_roots
from scipy.special import sici
from gpaw import GPAW
from gpaw.blacs import BlacsGrid, Redistributor
from gpaw.utilities.blas import gemmdot, axpy
from gpaw.wavefunctions.pw import PWDescriptor
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.xc.rpa import RPACorrelation
from gpaw.io.tar import Writer, Reader
from gpaw.fd_operators import Gradient, Laplace
import gpaw.mpi as mpi
class FXCCorrelation(RPACorrelation):
def __init__(self, calc, xc='RPA', filename=None,
skip_gamma=False, qsym=True, nlambda=8,
nfrequencies=16, frequency_max=800.0, frequency_scale=2.0,
frequencies=None, weights=None, density_cut=1.e-6,
wcomm=None, chicomm=None, world=mpi.world,
unit_cells=None, tag=None,
txt=sys.stdout):
RPACorrelation.__init__(self, calc, xc=xc, filename=filename,
skip_gamma=skip_gamma, qsym=qsym,
nfrequencies=nfrequencies, nlambda=nlambda,
frequency_max=frequency_max,
frequency_scale=frequency_scale,
frequencies=frequencies, weights=weights,
wcomm=wcomm, chicomm=chicomm, world=world,
txt=txt)
self.l_l, self.weight_l = p_roots(nlambda)
self.l_l = (self.l_l + 1.0) * 0.5
self.weight_l *= 0.5
self.xc = xc
self.density_cut = density_cut
if unit_cells is None:
unit_cells = self.calc.wfs.kd.N_c
self.unit_cells = unit_cells
if tag is None:
tag = self.calc.atoms.get_chemical_formula(mode='hill')
self.tag = tag
def calculate(self, ecut):
if self.xc != 'RPA':
if isinstance(ecut, (float, int)):
self.ecut_max = ecut
else:
self.ecut_max = max(ecut)
if not os.path.isfile('fhxc_%s_%s_%s_0.gpw'
% (self.tag, self.xc, self.ecut_max)):
kernel = Kernel(self.calc, self.xc, self.ibzq_qc,
self.fd, self.unit_cells, self.density_cut,
self.ecut_max, self.tag)
kernel.calculate_fhxc()
del kernel
else:
prnt('%s kernel already calculated' % self.xc, file=self.fd)
prnt(file=self.fd)
if self.calc.wfs.nspins == 1:
spin = False
else:
spin = True
e = RPACorrelation.calculate(self, ecut, spin=spin)
return e
def calculate_q(self, chi0, pd,
chi0_swGG, chi0_swxvG, chi0_swvv,
Q_aGii, m1, m2, cut_G):
if chi0_swxvG is None:
chi0_swxvG = range(2) # Not used
chi0_swvv = range(2) # Not used
chi0._calculate(pd, chi0_swGG[0], chi0_swxvG[0], chi0_swvv[0],
Q_aGii, m1, m2, [0])
if len(chi0_swGG) == 2:
chi0._calculate(pd, chi0_swGG[1], chi0_swxvG[1], chi0_swvv[1],
Q_aGii, m1, m2, [1])
prnt('E_c(q) = ', end='', file=self.fd)
if not pd.kd.gamma:
e = self.calculate_energy(pd, chi0_swGG, cut_G)
prnt('%.3f eV' % (e * Hartree), file=self.fd)
self.fd.flush()
else:
e = 0.0
for v in range(3):
chi0_swGG[:, :, 0] = chi0_swxvG[:, :, 0, v]
chi0_swGG[:, :, :, 0] = chi0_swxvG[:, :, 1, v]
chi0_swGG[:, :, 0, 0] = chi0_swvv[:, :, v, v]
ev = self.calculate_energy(pd, chi0_swGG, cut_G)
e += ev
prnt('%.3f' % (ev * Hartree), end='', file=self.fd)
if v < 2:
prnt('/', end='', file=self.fd)
else:
prnt('eV', file=self.fd)
self.fd.flush()
e /= 3
return e
def calculate_energy(self, pd, chi0_swGG, cut_G):
"""Evaluate correlation energy from chi0 and the kernel fhxc"""
ibzq2_q = [np.dot(self.ibzq_qc[i] - pd.kd.bzk_kc[0],
self.ibzq_qc[i] - pd.kd.bzk_kc[0])
for i in range(len(self.ibzq_qc))]
qi = np.argsort(ibzq2_q)[0]
G_G = pd.G2_qG[0]**0.5 # |G+q|
if cut_G is not None:
G_G = G_G[cut_G]
nG = len(G_G)
ns = len(chi0_swGG)
if self.xc != 'RPA':
r = Reader('fhxc_%s_%s_%s_%s.gpw' %
(self.tag, self.xc, self.ecut_max, qi))
fv = r.get('fhxc_sGsG')
if cut_G is not None:
cut_sG = np.tile(cut_G, ns)
cut_sG[len(cut_G):] += len(fv) / ns
fv = fv.take(cut_sG, 0).take(cut_sG, 1)
for s1 in range(ns):
for s2 in range(ns):
m1 = s1 * nG
n1 = (s1 + 1) * nG
m2 = s2 * nG
n2 = (s2 + 1) * nG
fv[m1:n1, m2:n2] *= G_G * G_G[:, np.newaxis] / 4 / np.pi
if np.prod(self.unit_cells) > 1 and pd.kd.gamma:
m1 = s1 * nG
n1 = (s1 + 1) * nG
m2 = s2 * nG
n2 = (s2 + 1) * nG
fv[m1, m2:n2] = 0.0
fv[m1:n1, m2] = 0.0
fv[m1, m2] = 1.0
else:
fv = np.tile(np.eye(nG), (ns, ns))
if pd.kd.gamma:
G_G[0] = 1.0
e_w = []
j = 0
for chi0_sGG in np.swapaxes(chi0_swGG, 0, 1):
if cut_G is not None:
chi0_sGG = chi0_sGG.take(cut_G, 1).take(cut_G, 2)
chi0v = np.zeros((ns * nG, ns * nG), dtype=complex)
for s in range(ns):
m = s * nG
n = (s + 1) * nG
chi0v[m:n, m:n] = chi0_sGG[s] / G_G / G_G[:, np.newaxis]
chi0v *= 4 * np.pi
del chi0_sGG
e = 0.0
for l, weight in zip(self.l_l, self.weight_l):
chiv = np.linalg.solve(np.eye(nG * ns) -
l * np.dot(chi0v, fv), chi0v).real
for s1 in range(ns):
for s2 in range(ns):
m1 = s1 * nG
n1 = (s1 + 1) * nG
m2 = s2 * nG
n2 = (s2 + 1) * nG
chiv_s1s2 = chiv[m1:n1, m2:n2]
e -= np.trace(chiv_s1s2) * weight
e += np.trace(chi0v.real)
e_w.append(e)
E_w = np.zeros_like(self.omega_w)
self.wcomm.all_gather(np.array(e_w), E_w)
energy = np.dot(E_w, self.weight_w) / (2 * np.pi)
return energy
class Kernel:
def __init__(self, calc, xc, ibzq_qc, fd, unit_cells,
density_cut, ecut, tag):
self.calc = calc
self.gd = calc.density.gd
self.xc = xc
self.ibzq_qc = ibzq_qc
self.fd = fd
self.unit_cells = unit_cells
self.density_cut = density_cut
self.ecut = ecut
self.tag = tag
self.A_x = -(3 / 4.) * (3 / np.pi)**(1 / 3.)
self.n_g = calc.get_all_electron_density(gridrefinement=1)
self.n_g *= Bohr**3
if xc[-3:] == 'PBE':
nf_g = calc.get_all_electron_density(gridrefinement=2)
nf_g *= Bohr**3
gdf = self.gd.refine()
grad_v = [Gradient(gdf, v, n=1).apply for v in range(3)]
gradnf_vg = gdf.empty(3)
for v in range(3):
grad_v[v](nf_g, gradnf_vg[v])
self.gradn_vg = gradnf_vg[:, ::2, ::2, ::2]
qd = KPointDescriptor(self.ibzq_qc)
self.pd = PWDescriptor(ecut / Hartree, self.gd, complex, qd)
def calculate_fhxc(self):
prnt('Calculating %s kernel at %d eV cutoff' %
(self.xc, self.ecut), file=self.fd)
if self.xc[0] == 'r':
self.calculate_rkernel()
else:
assert self.xc[0] == 'A'
self.calculate_local_kernel()
def calculate_rkernel(self):
gd = self.gd
ng_c = gd.N_c
cell_cv = gd.cell_cv
icell_cv = 2 * np.pi * np.linalg.inv(cell_cv)
vol = np.linalg.det(cell_cv)
ns = self.calc.wfs.nspins
n_g = self.n_g # density on rough grid
fx_g = ns * self.get_fxc_g(n_g) # local exchange kernel
qc_g = (-4 * np.pi * ns / fx_g)**0.5 # cutoff functional
flocal_g = qc_g**3 * fx_g / (6 * np.pi**2) # ren. x-kernel for r=r'
Vlocal_g = 2 * qc_g / np.pi # ren. Hartree kernel for r=r'
ng = np.prod(ng_c) # number of grid points
r_vg = gd.get_grid_point_coordinates()
rx_g = r_vg[0].flatten()
ry_g = r_vg[1].flatten()
rz_g = r_vg[2].flatten()
prnt(' %d grid points and %d plane waves at the Gamma point' %
(ng, self.pd.ngmax), file=self.fd)
# Unit cells
R_Rv = []
weight_R = []
nR_v = self.unit_cells
nR = np.prod(nR_v)
for i in range(-nR_v[0] + 1, nR_v[0]):
for j in range(-nR_v[1] + 1, nR_v[1]):
for h in range(-nR_v[2] + 1, nR_v[2]):
R_Rv.append(i * cell_cv[0] +
j * cell_cv[1] +
h * cell_cv[2])
weight_R.append((nR_v[0] - abs(i)) *
(nR_v[1] - abs(j)) *
(nR_v[2] - abs(h)) / float(nR))
if nR > 1:
# with more than one unit cell only the exchange kernel is
# calculated on the grid. The bare Coulomb kernel is added
# in PW basis and Vlocal_g only the exchange part
dv = self.calc.density.gd.dv
gc = (3 * dv / 4 / np.pi)**(1 / 3.)
Vlocal_g -= 2 * np.pi * gc**2 / dv
prnt(' Lattice point sampling: ' +
'(%s x %s x %s)^2 ' % (nR_v[0], nR_v[1], nR_v[2]) +
' Reduced to %s lattice points' % len(R_Rv), file=self.fd)
l_g_size = -(-ng // mpi.world.size)
l_g_range = range(mpi.world.rank * l_g_size,
min((mpi.world.rank+1) * l_g_size, ng))
fhxc_qsGr = {}
for iq in range(len(self.ibzq_qc)):
fhxc_qsGr[iq] = np.zeros((ns, len(self.pd.G2_qG[iq]),
len(l_g_range)), dtype=complex)
inv_error = np.seterr()
np.seterr(invalid='ignore')
np.seterr(divide='ignore')
t0 = time()
# Loop over Lattice points
for i, R_v in enumerate(R_Rv):
# Loop over r'. f_rr and V_rr are functions of r (dim. as r_vg[0])
if i == 1:
prnt(' Finished 1 cell in %s seconds' % int(time() - t0) +
' - estimated %s seconds left' %
int((len(R_Rv) - 1) * (time() - t0)),
file=self.fd)
self.fd.flush()
if len(R_Rv) > 5:
if (i+1) % (len(R_Rv) / 5 + 1) == 0:
prnt(' Finished %s cells in %s seconds'
% (i, int(time() - t0))
+ ' - estimated %s seconds left'
% int((len(R_Rv) - i) * (time() - t0) / i),
file=self.fd)
self.fd.flush()
for g in l_g_range:
rx = rx_g[g] + R_v[0]
ry = ry_g[g] + R_v[1]
rz = rz_g[g] + R_v[2]
# |r-r'-R_i|
rr = ((r_vg[0] - rx)**2 +
(r_vg[1] - ry)**2 +
(r_vg[2] - rz)**2)**0.5
n_av = (n_g + n_g.flatten()[g]) / 2.
fx_g = ns * self.get_fxc_g(n_av, index=g)
qc_g = (-4 * np.pi * ns / fx_g)**0.5
x = qc_g * rr
osc_x = np.sin(x) - x*np.cos(x)
f_rr = fx_g * osc_x / (2 * np.pi**2 * rr**3)
if nR > 1: # include only exchange part of the kernel here
V_rr = (sici(x)[0] * 2 / np.pi - 1) / rr
else: # include the full kernel (also hartree part)
V_rr = (sici(x)[0] * 2 / np.pi) / rr
# Terms with r = r'
if (np.abs(R_v) < 0.001).all():
tmp_flat = f_rr.flatten()
tmp_flat[g] = flocal_g.flatten()[g]
f_rr = tmp_flat.reshape(ng_c)
tmp_flat = V_rr.flatten()
tmp_flat[g] = Vlocal_g.flatten()[g]
V_rr = tmp_flat.reshape(ng_c)
del tmp_flat
f_rr[np.where(n_av < self.density_cut)] = 0.0
V_rr[np.where(n_av < self.density_cut)] = 0.0
f_rr *= weight_R[i]
V_rr *= weight_R[i]
# r-r'-R_i
r_r = np.array([r_vg[0] - rx, r_vg[1] - ry, r_vg[2] - rz])
# Fourier transform of r
for iq, q in enumerate(self.ibzq_qc):
q_v = np.dot(q, icell_cv)
e_q = np.exp(-1j * gemmdot(q_v, r_r, beta=0.0))
f_q = self.pd.fft((f_rr + V_rr) * e_q, iq) * vol / ng
fhxc_qsGr[iq][0, :, g - l_g_range[0]] += f_q
if ns == 2:
f_q = self.pd.fft(V_rr * e_q, iq) * vol / ng
fhxc_qsGr[iq][1, :, g - l_g_range[0]] += f_q
mpi.world.barrier()
np.seterr(**inv_error)
for iq, q in enumerate(self.ibzq_qc):
npw = len(self.pd.G2_qG[iq])
fhxc_sGsG = np.zeros((ns * npw, ns * npw), complex)
l_pw_size = -(-npw // mpi.world.size) # parallelize over PW below
l_pw_range = range(mpi.world.rank * l_pw_size,
min((mpi.world.rank + 1) * l_pw_size, npw))
if mpi.world.size > 1:
# redistribute grid and plane waves in fhxc_qsGr[iq]
bg1 = BlacsGrid(mpi.world, 1, mpi.world.size)
bg2 = BlacsGrid(mpi.world, mpi.world.size, 1)
bd1 = bg1.new_descriptor(npw, ng, npw, - (-ng / mpi.world.size))
bd2 = bg2.new_descriptor(npw, ng, -(-npw / mpi.world.size), ng)
fhxc_Glr = np.zeros((len(l_pw_range), ng), dtype=complex)
if ns == 2:
Koff_Glr = np.zeros((len(l_pw_range), ng), dtype=complex)
r = Redistributor(bg1.comm, bd1, bd2)
r.redistribute(fhxc_qsGr[iq][0], fhxc_Glr, npw, ng)
if ns == 2:
r.redistribute(fhxc_qsGr[iq][1], Koff_Glr, npw, ng)
else:
fhxc_Glr = fhxc_qsGr[iq][0]
if ns == 2:
Koff_Glr = fhxc_qsGr[iq][1]
# Fourier transform of r'
for iG in range(len(l_pw_range)):
f_g = fhxc_Glr[iG].reshape(ng_c)
f_G = self.pd.fft(f_g.conj(), iq) * vol / ng
fhxc_sGsG[l_pw_range[0] + iG, :npw] = f_G.conj()
if ns == 2:
v_g = Koff_Glr[iG].reshape(ng_c)
v_G = self.pd.fft(v_g.conj(), iq) * vol / ng
fhxc_sGsG[npw + l_pw_range[0] + iG, :npw] = v_G.conj()
if ns == 2: # f_00 = f_11 and f_01 = f_10
fhxc_sGsG[:npw, npw:] = fhxc_sGsG[npw:, :npw]
fhxc_sGsG[npw:, npw:] = fhxc_sGsG[:npw, :npw]
mpi.world.sum(fhxc_sGsG)
fhxc_sGsG /= vol
if mpi.rank == 0:
w = Writer('fhxc_%s_%s_%s_%s.gpw' %
(self.tag, self.xc, self.ecut, iq))
w.dimension('sG', ns * npw)
w.add('fhxc_sGsG', ('sG', 'sG'), dtype=complex)
if nR > 1: # add Hartree kernel evaluated in PW basis
Gq2_G = self.pd.G2_qG[iq]
if (q == 0).all():
Gq2_G[0] = 1.
vq_G = 4 * np.pi / Gq2_G
fhxc_sGsG += np.tile(np.eye(npw) * vq_G, (ns, ns))
w.fill(fhxc_sGsG)
w.close()
mpi.world.barrier()
prnt(file=self.fd)
def calculate_local_kernel(self):
# Standard ALDA exchange kernel
# Use with care. Results are very difficult to converge
# Sensitive to density_cut
ns = self.calc.wfs.nspins
gd = self.gd
pd = self.pd
cell_cv = gd.cell_cv
icell_cv = 2 * np.pi * np.linalg.inv(cell_cv)
vol = np.linalg.det(cell_cv)
fxc_sg = ns * self.get_fxc_g(ns * self.n_g)
fxc_sg[np.where(self.n_g < self.density_cut)] = 0.0
r_vg = gd.get_grid_point_coordinates()
for iq in range(len(self.ibzq_qc)):
Gvec_Gc = np.dot(pd.G_Qv[pd.Q_qG[iq]], cell_cv / (2 * np.pi))
npw = len(Gvec_Gc)
l_pw_size = -(-npw // mpi.world.size)
l_pw_range = range(mpi.world.rank * l_pw_size,
min((mpi.world.rank + 1) * l_pw_size, npw))
fhxc_sGsG = np.zeros((ns * npw, ns * npw), dtype=complex)
for s in range(ns):
for iG in l_pw_range:
for jG in range(npw):
fxc = fxc_sg[s].copy()
dG_c = Gvec_Gc[iG] - Gvec_Gc[jG]
dG_v = np.dot(dG_c, icell_cv)
dGr_g = gemmdot(dG_v, r_vg, beta=0.0)
ft_fxc = gd.integrate(np.exp(-1j * dGr_g) * fxc)
fhxc_sGsG[s * npw + iG, s * npw + jG] = ft_fxc
mpi.world.sum(fhxc_sGsG)
fhxc_sGsG /= vol
Gq2_G = self.pd.G2_qG[iq]
if (self.ibzq_qc[iq] == 0).all():
Gq2_G[0] = 1.
vq_G = 4 * np.pi / Gq2_G
fhxc_sGsG += np.tile(np.eye(npw) * vq_G, (ns, ns))
if mpi.rank == 0:
w = Writer('fhxc_%s_%s_%s_%s.gpw' %
(self.tag, self.xc, self.ecut, iq))
w.dimension('sG', ns * npw)
w.add('fhxc_sGsG', ('sG', 'sG'), dtype=complex)
w.fill(fhxc_sGsG)
w.close()
mpi.world.barrier()
prnt(file=self.fd)
def get_fxc_g(self, n_g, index=None):
if self.xc[-3:] == 'LDA':
return self.get_lda_g(n_g)
elif self.xc[-3:] == 'PBE':
return self.get_pbe_g(n_g, index=index)
else:
raise '%s kernel not recognized' % self.xc
def get_lda_g(self, n_g):
return (4. / 9.) * self.A_x * n_g**(-2./3.)
def get_pbe_g(self, n_g, index=None):
if index is None:
gradn_vg = self.gradn_vg
else:
gradn_vg = self.calc.density.gd.empty(3)
for v in range(3):
gradn_vg[v] = (self.gradn_vg[v] +
self.gradn_vg[v].flatten()[index]) / 2
kf_g = (3. * np.pi**2 * n_g)**(1 / 3.)
s2_g = np.zeros_like(n_g)
for v in range(3):
axpy(1.0, gradn_vg[v]**2, s2_g)
s2_g /= 4 * kf_g**2 * n_g**2
e_g = self.A_x * n_g**(4 / 3.)
v_g = (4 / 3.) * e_g / n_g
f_g = (1 / 3.) * v_g / n_g
kappa = 0.804
mu = 0.2195149727645171
denom_g = (1 + mu * s2_g / kappa)
F_g = 1. + kappa - kappa / denom_g
Fn_g = -mu / denom_g**2 * 8 * s2_g / (3 * n_g)
Fnn_g = -11 * Fn_g / (3 * n_g) - 2 * Fn_g**2 / kappa
fxc_g = f_g * F_g
fxc_g += 2 * v_g * Fn_g
fxc_g += e_g * Fnn_g
# Contributions from varying the gradient
#Fgrad_vg = np.zeros_like(gradn_vg)
#Fngrad_vg = np.zeros_like(gradn_vg)
#for v in range(3):
# axpy(1.0, mu / den_g**2 * gradn_vg[v] / (2 * kf_g**2 * n_g**2),
# Fgrad_vg[v])
# axpy(-8.0, Fgrad_vg[v] / (3 * n_g), Fngrad_vg[v])
# axpy(-2.0, Fgrad_vg[v] * Fn_g / kappa, Fngrad_vg[v])
#tmp = np.zeros_like(fxc_g)
#tmp1 = np.zeros_like(fxc_g)
#for v in range(3):
#self.grad_v[v](Fgrad_vg[v], tmp)
#axpy(-2.0, tmp * v_g, fxc_g)
#for u in range(3):
#self.grad_v[u](Fgrad_vg[u] * tmp, tmp1)
#axpy(-4.0/kappa, tmp1 * e_g, fxc_g)
#self.grad_v[v](Fngrad_vg[v], tmp)
#axpy(-2.0, tmp * e_g, fxc_g)
#self.laplace(mu / den_g**2 / (2 * kf_g**2 * n_g**2), tmp)
#axpy(1.0, tmp * e_g, fxc_g)
return fxc_g
def get_fxc_libxc_g(self, n_g):
### NOT USED AT THE MOMENT
gd = self.calc.density.gd.refine()
xc = XC('GGA_X_' + self.xc[2:])
#xc = XC('LDA_X')
#sigma = np.zeros_like(n_g).flat[:]
xc.set_grid_descriptor(gd)
sigma_xg, gradn_svg = xc.calculate_sigma(np.array([n_g]))
dedsigma_xg = np.zeros_like(sigma_xg)
e_g = np.zeros_like(n_g)
v_sg = np.array([np.zeros_like(n_g)])
xc.calculate_gga(e_g, np.array([n_g]), v_sg, sigma_xg, dedsigma_xg)
sigma = sigma_xg[0].flat[:]
gradn_vg = gradn_svg[0]
dedsigma_g = dedsigma_xg[0]
libxc = LibXC('GGA_X_' + self.xc[2:])
#libxc = LibXC('LDA_X')
libxc.initialize(1)
libxc_fxc = libxc.xc.calculate_fxc_spinpaired
fxc_g = np.zeros_like(n_g).flat[:]
d2edndsigma_g = np.zeros_like(n_g).flat[:]
d2ed2sigma_g = np.zeros_like(n_g).flat[:]
libxc_fxc(n_g.flat[:], fxc_g, sigma, d2edndsigma_g, d2ed2sigma_g)
fxc_g = fxc_g.reshape(np.shape(n_g))
d2edndsigma_g = d2edndsigma_g.reshape(np.shape(n_g))
d2ed2sigma_g = d2ed2sigma_g.reshape(np.shape(n_g))
tmp = np.zeros_like(fxc_g)
tmp1 = np.zeros_like(fxc_g)
#for v in range(3):
#self.grad_v[v](d2edndsigma_g * gradn_vg[v], tmp)
#axpy(-4.0, tmp, fxc_g)
#for u in range(3):
#for v in range(3):
#self.grad_v[v](d2ed2sigma_g * gradn_vg[u] * gradn_vg[v], tmp)
#self.grad_v[u](tmp, tmp1)
#axpy(4.0, tmp1, fxc_g)
#self.laplace(dedsigma_g, tmp)
#axpy(2.0, tmp, fxc_g)
return fxc_g[::2, ::2, ::2]
def get_numerical_fxc_sg(self, n_sg):
### NOT USED AT THE MOMENT
gd = self.calc.density.gd.refine()
delta = 1.e-4
if self.xc[2:] == 'LDA':
xc = XC('LDA_X')
v1xc_sg = np.zeros_like(n_sg)
v2xc_sg = np.zeros_like(n_sg)
xc.calculate(gd, (1 + delta) * n_sg, v1xc_sg)
xc.calculate(gd, (1 - delta) * n_sg, v2xc_sg)
fxc_sg = (v1xc_sg - v2xc_sg) / (2 * delta * n_sg)
else:
fxc_sg = np.zeros_like(n_sg)
xc = XC('GGA_X_' + self.xc[2:])
vxc_sg = np.zeros_like(n_sg)
xc.calculate(gd, n_sg, vxc_sg)
for s in range(len(n_sg)):
for x in range(len(n_sg[0])):
for y in range(len(n_sg[0, 0])):
for z in range(len(n_sg[0, 0, 0])):
v1xc_sg = np.zeros_like(n_sg)
n1_sg = n_sg.copy()
n1_sg[s, x, y, z] *= (1 + delta)
xc.calculate(gd, n1_sg, v1xc_sg)
num = v1xc_sg[s, x, y, z] - vxc_sg[s, x, y, z]
den = delta * n_sg[s, x, y, z]
fxc_sg[s, x, y, z] = num / den
return fxc_sg[:, ::2, ::2, ::2]
|
robwarm/gpaw-symm
|
gpaw/xc/fxc.py
|
Python
|
gpl-3.0
| 24,057
|
[
"ASE",
"GPAW"
] |
e23130d1f3a624c32a10887b34eda93278a6ff3fff65b9e546a9146becd36fbb
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`presentationplugin` module provides the ability for OpenLP to display
presentations from a variety of document formats.
"""
import os
import logging
from PyQt4 import QtCore
from openlp.core.lib import Plugin, StringContent, build_icon, translate
from openlp.core.utils import AppLocation
from openlp.plugins.presentations.lib import PresentationController, \
PresentationMediaItem, PresentationTab
log = logging.getLogger(__name__)
__default_settings__ = {
u'presentations/override app': QtCore.Qt.Unchecked,
u'presentations/Impress': QtCore.Qt.Checked,
u'presentations/Powerpoint': QtCore.Qt.Checked,
u'presentations/Powerpoint Viewer': QtCore.Qt.Checked,
u'presentations/presentations files': []
}
class PresentationPlugin(Plugin):
"""
This plugin allowed a Presentation to be opened, controlled and displayed
on the output display. The plugin controls third party applications such
as OpenOffice.org Impress, Microsoft PowerPoint and the PowerPoint viewer
"""
log = logging.getLogger(u'PresentationPlugin')
def __init__(self):
"""
PluginPresentation constructor.
"""
log.debug(u'Initialised')
self.controllers = {}
Plugin.__init__(self, u'presentations', __default_settings__, __default_settings__)
self.weight = -8
self.iconPath = u':/plugins/plugin_presentations.png'
self.icon = build_icon(self.iconPath)
def createSettingsTab(self, parent):
"""
Create the settings Tab
"""
visible_name = self.getString(StringContent.VisibleName)
self.settingsTab = PresentationTab(parent, self.name, visible_name[u'title'], self.controllers, self.iconPath)
def initialise(self):
"""
Initialise the plugin. Determine which controllers are enabled
are start their processes.
"""
log.info(u'Presentations Initialising')
Plugin.initialise(self)
for controller in self.controllers:
if self.controllers[controller].enabled():
try:
self.controllers[controller].start_process()
except Exception:
log.warn(u'Failed to start controller process')
self.controllers[controller].available = False
self.mediaItem.buildFileMaskString()
def finalise(self):
"""
Finalise the plugin. Ask all the enabled presentation applications
to close down their applications and release resources.
"""
log.info(u'Plugin Finalise')
# Ask each controller to tidy up.
for key in self.controllers:
controller = self.controllers[key]
if controller.enabled():
controller.kill()
Plugin.finalise(self)
def createMediaManagerItem(self):
"""
Create the Media Manager List
"""
self.mediaItem = PresentationMediaItem(
self.main_window.mediaDockManager.media_dock, self, self.icon, self.controllers)
def registerControllers(self, controller):
"""
Register each presentation controller (Impress, PPT etc) and
store for later use
"""
self.controllers[controller.name] = controller
def checkPreConditions(self):
"""
Check to see if we have any presentation software available
If Not do not install the plugin.
"""
log.debug(u'checkPreConditions')
controller_dir = os.path.join(
AppLocation.get_directory(AppLocation.PluginsDir),
u'presentations', u'lib')
for filename in os.listdir(controller_dir):
if filename.endswith(u'controller.py') and not filename == 'presentationcontroller.py':
path = os.path.join(controller_dir, filename)
if os.path.isfile(path):
modulename = u'openlp.plugins.presentations.lib.' + os.path.splitext(filename)[0]
log.debug(u'Importing controller %s', modulename)
try:
__import__(modulename, globals(), locals(), [])
except ImportError:
log.warn(u'Failed to import %s on path %s',
modulename, path)
controller_classes = PresentationController.__subclasses__()
for controller_class in controller_classes:
controller = controller_class(self)
self.registerControllers(controller)
return bool(self.controllers)
def about(self):
"""
Return information about this plugin
"""
about_text = translate('PresentationPlugin', '<strong>Presentation '
'Plugin</strong><br />The presentation plugin provides the '
'ability to show presentations using a number of different '
'programs. The choice of available presentation programs is '
'available to the user in a drop down box.')
return about_text
def setPluginTextStrings(self):
"""
Called to define all translatable texts of the plugin
"""
## Name PluginList ##
self.textStrings[StringContent.Name] = {
u'singular': translate('PresentationPlugin', 'Presentation', 'name singular'),
u'plural': translate('PresentationPlugin', 'Presentations', 'name plural')
}
## Name for MediaDockManager, SettingsManager ##
self.textStrings[StringContent.VisibleName] = {
u'title': translate('PresentationPlugin', 'Presentations', 'container title')
}
# Middle Header Bar
tooltips = {
u'load': translate('PresentationPlugin', 'Load a new presentation.'),
u'import': u'',
u'new': u'',
u'edit': u'',
u'delete': translate('PresentationPlugin', 'Delete the selected presentation.'),
u'preview': translate('PresentationPlugin', 'Preview the selected presentation.'),
u'live': translate('PresentationPlugin', 'Send the selected presentation live.'),
u'service': translate('PresentationPlugin', 'Add the selected presentation to the service.')
}
self.setPluginUiTextStrings(tooltips)
|
marmyshev/transitions
|
openlp/plugins/presentations/presentationplugin.py
|
Python
|
gpl-2.0
| 8,434
|
[
"Brian"
] |
15c6bb09fc1252cc569f2ac9a42cdb86590743c353bdb0850b504ef88af406b6
|
"""
Test refinement of beam, detector and crystal orientation parameters
using generated reflection positions from ideal geometry.
Control of the experimental model and choice of minimiser is done via
PHIL, which means we can do, for example:
cctbx.python tst_orientation_refinement.py \
"random_seed=3; engine=LBFGScurvs"
"""
from __future__ import annotations
import sys
def test(args=[]):
from math import pi
from cctbx.sgtbx import space_group, space_group_symbols
# Symmetry constrained parameterisation for the unit cell
from cctbx.uctbx import unit_cell
# We will set up a mock scan and a mock experiment list
from dxtbx.model import ScanFactory
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
from libtbx.test_utils import approx_equal
from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge
from scitbx import matrix
from scitbx.array_family import flex
# Get modules to build models and minimiser using PHIL
import dials.tests.algorithms.refinement.setup_geometry as setup_geometry
import dials.tests.algorithms.refinement.setup_minimiser as setup_minimiser
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
# Model parameterisations
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationSinglePanel,
)
# Parameterisation of the prediction equation
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ScansExperimentsPredictor,
ScansRayPredictor,
)
from dials.algorithms.refinement.reflection_manager import ReflectionManager
# Imports for the target function
from dials.algorithms.refinement.target import (
LeastSquaresPositionalResidualWithRmsdCutoff,
)
# Reflection prediction
from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection
#############################
# Setup experimental models #
#############################
master_phil = parse(
"""
include scope dials.tests.algorithms.refinement.geometry_phil
include scope dials.tests.algorithms.refinement.minimiser_phil
""",
process_includes=True,
)
models = setup_geometry.Extract(master_phil, cmdline_args=args)
mydetector = models.detector
mygonio = models.goniometer
mycrystal = models.crystal
mybeam = models.beam
# Build a mock scan for a 180 degree sequence
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 1800),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(1800)),
deg=True,
)
sequence_range = myscan.get_oscillation_range(deg=False)
im_width = myscan.get_oscillation(deg=False)[1]
assert sequence_range == (0.0, pi)
assert approx_equal(im_width, 0.1 * pi / 180.0)
# Build an experiment list
experiments = ExperimentList()
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal,
imageset=None,
)
)
###########################
# Parameterise the models #
###########################
det_param = DetectorParameterisationSinglePanel(mydetector)
s0_param = BeamParameterisation(mybeam, mygonio)
xlo_param = CrystalOrientationParameterisation(mycrystal)
xluc_param = CrystalUnitCellParameterisation(mycrystal)
# Fix beam to the X-Z plane (imgCIF geometry), fix wavelength
s0_param.set_fixed([True, False, True])
# Fix crystal parameters
# xluc_param.set_fixed([True, True, True, True, True, True])
########################################################################
# Link model parameterisations together into a parameterisation of the #
# prediction equation #
########################################################################
pred_param = XYPhiPredictionParameterisation(
experiments, [det_param], [s0_param], [xlo_param], [xluc_param]
)
################################
# Apply known parameter shifts #
################################
# shift detector by 1.0 mm each translation and 2 mrad each rotation
det_p_vals = det_param.get_param_vals()
p_vals = [a + b for a, b in zip(det_p_vals, [1.0, 1.0, 1.0, 2.0, 2.0, 2.0])]
det_param.set_param_vals(p_vals)
# shift beam by 2 mrad in free axis
s0_p_vals = s0_param.get_param_vals()
p_vals = list(s0_p_vals)
p_vals[0] += 2.0
s0_param.set_param_vals(p_vals)
# rotate crystal a bit (=2 mrad each rotation)
xlo_p_vals = xlo_param.get_param_vals()
p_vals = [a + b for a, b in zip(xlo_p_vals, [2.0, 2.0, 2.0])]
xlo_param.set_param_vals(p_vals)
# change unit cell a bit (=0.1 Angstrom length upsets, 0.1 degree of
# gamma angle)
xluc_p_vals = xluc_param.get_param_vals()
cell_params = mycrystal.get_unit_cell().parameters()
cell_params = [a + b for a, b in zip(cell_params, [0.1, 0.1, 0.1, 0.0, 0.0, 0.1])]
new_uc = unit_cell(cell_params)
newB = matrix.sqr(new_uc.fractionalization_matrix()).transpose()
S = symmetrize_reduce_enlarge(mycrystal.get_space_group())
S.set_orientation(orientation=newB)
X = tuple([e * 1.0e5 for e in S.forward_independent_parameters()])
xluc_param.set_param_vals(X)
#############################
# Generate some reflections #
#############################
print("Reflections will be generated with the following geometry:")
print(mybeam)
print(mydetector)
print(mycrystal)
print("Target values of parameters are")
msg = "Parameters: " + "%.5f " * len(pred_param)
print(msg % tuple(pred_param.get_param_vals()))
print()
# All indices in a 2.0 Angstrom sphere
resolution = 2.0
index_generator = IndexGenerator(
mycrystal.get_unit_cell(),
space_group(space_group_symbols(1).hall()).type(),
resolution,
)
indices = index_generator.to_array()
# Predict rays within the sequence range
ray_predictor = ScansRayPredictor(experiments, sequence_range)
obs_refs = ray_predictor(indices)
print("Total number of reflections excited", len(obs_refs))
# Take only those rays that intersect the detector
intersects = ray_intersection(mydetector, obs_refs)
obs_refs = obs_refs.select(intersects)
# Make a reflection predictor and re-predict for all these reflections. The
# result is the same, but we gain also the flags and xyzcal.px columns
ref_predictor = ScansExperimentsPredictor(experiments)
obs_refs["id"] = flex.int(len(obs_refs), 0)
obs_refs = ref_predictor(obs_refs)
# Set 'observed' centroids from the predicted ones
obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"]
# Invent some variances for the centroid positions of the simulated data
im_width = 0.1 * pi / 180.0
px_size = mydetector[0].get_pixel_size()
var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2)
var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2)
var_phi = flex.double(len(obs_refs), (im_width / 2.0) ** 2)
obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
print("Total number of observations made", len(obs_refs))
###############################
# Undo known parameter shifts #
###############################
s0_param.set_param_vals(s0_p_vals)
det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
xluc_param.set_param_vals(xluc_p_vals)
print("Initial values of parameters are")
msg = "Parameters: " + "%.5f " * len(pred_param)
print(msg % tuple(pred_param.get_param_vals()))
print()
#####################################
# Select reflections for refinement #
#####################################
refman = ReflectionManager(obs_refs, experiments)
##############################
# Set up the target function #
##############################
# The current 'achieved' criterion compares RMSD against 1/3 the pixel size and
# 1/3 the image width in radians. For the simulated data, these are just made up
mytarget = LeastSquaresPositionalResidualWithRmsdCutoff(
experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None
)
################################
# Set up the refinement engine #
################################
refiner = setup_minimiser.Extract(
master_phil, mytarget, pred_param, cmdline_args=args
).refiner
print("Prior to refinement the experimental model is:")
print(mybeam)
print(mydetector)
print(mycrystal)
refiner.run()
print()
print("Refinement has completed with the following geometry:")
print(mybeam)
print(mydetector)
print(mycrystal)
if __name__ == "__main__":
test(sys.argv[1:])
|
dials/dials
|
tests/algorithms/refinement/test_orientation_refinement.py
|
Python
|
bsd-3-clause
| 9,452
|
[
"CRYSTAL"
] |
6b9f12889383f2aee1ac151b709500f8ba4a303d46d1387292cf3848edef7071
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.build.builder Contains the ModelBuilder class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from collections import OrderedDict
# Import the relevant PTS classes and modules
from ...core.tools import filesystem as fs
from ...core.tools.utils import lazyproperty
from .tables import ModelsTable, RepresentationsTable, ModelMapsTable
from ...core.basics.map import Map
from ...core.basics.configuration import open_mapping
from ..basics.models import DeprojectionModel3D, load_3d_model
from ...core.tools.serialization import load_dict
from ...magic.core.frame import Frame
from ...magic.basics.coordinatesystem import CoordinateSystem
from .representation import Representation
from ...core.basics.log import log
from .construct import add_stellar_component, add_dust_component
from ...core.tools.utils import create_lazified_class
from ...core.tools import tables
# -----------------------------------------------------------------
model_map_basename = "map"
# -----------------------------------------------------------------
parameters_filename = "parameters.cfg"
deprojection_filename = "deprojection.mod"
model_map_filename = model_map_basename + ".fits"
model_filename = "model.mod"
properties_filename = "properties.dat"
# -----------------------------------------------------------------
models_name = "models"
representations_name = "representations"
# -----------------------------------------------------------------
models_table_filename = "models.dat"
maps_table_filename = "maps.dat"
representations_table_filename = "representations.dat"
# -----------------------------------------------------------------
class ModelSuite(object):
"""
This function ...
"""
def __init__(self, path):
"""
This function ...
:param path:
"""
# Set the build path
self.path = path
# Determine the path to the models directory
self.models_path = fs.create_directory_in(self.build_path, models_name)
# Determine the path to the models table
self.models_table_path = fs.join(self.models_path, models_table_filename)
# Initialize the models table if necessary
if not fs.is_file(self.models_table_path):
table = ModelsTable()
table.saveto(self.models_table_path)
# Determine the path to the maps table
self.maps_table_path = fs.join(self.models_path, maps_table_filename)
# Initialize the maps table if necessary
if not fs.is_file(self.maps_table_path):
table = ModelMapsTable()
table.saveto(self.maps_table_path)
# Determine the path to the representations directory
self.representations_path = fs.create_directory_in(self.build_path, representations_name)
# Determine the path to the representations table
self.representations_table_path = fs.join(self.representations_path, representations_table_filename)
# Initialize the representations table if necessary
if not fs.is_file(self.representations_table_path):
table = RepresentationsTable()
table.saveto(self.representations_table_path)
# -----------------------------------------------------------------
@classmethod
def from_modeling_path(cls, path):
"""
This function ...
:param path:
:return:
"""
return cls(fs.join(path, "build"))
# -----------------------------------------------------------------
@property
def build_path(self):
"""
This function ...
:return:
"""
return self.path
# -----------------------------------------------------------------
@property
def modeling_path(self):
"""
This function ...
:return:
"""
return fs.directory_of(self.path)
# -----------------------------------------------------------------
@lazyproperty
def galaxy_name(self):
"""
This function ...
:return:
"""
return fs.name(self.modeling_path)
# -----------------------------------------------------------------
@lazyproperty
def galaxy_properties_path(self):
"""
This function ...
:return:
"""
from ..core.environment import properties_name, data_name
return fs.join(self.modeling_path, data_name, properties_name)
# -----------------------------------------------------------------
@lazyproperty
def galaxy_properties(self):
"""
This function ...
:return:
"""
# Load the properties
from ..basics.properties import GalaxyProperties
return GalaxyProperties.from_file(self.galaxy_properties_path)
# -----------------------------------------------------------------
@property
def galaxy_distance(self):
"""
This function ...
:return:
"""
return self.galaxy_properties.distance
# -----------------------------------------------------------------
@property
def galaxy_center(self):
"""
This function ...
:return:
"""
return self.galaxy_properties.center
# -----------------------------------------------------------------
@property
def galaxy_info_path(self):
"""
This function ...
:return:
"""
# Set the path to the galaxy info file
modeling_data_path = fs.join(self.modeling_path, "data")
return fs.join(modeling_data_path, "info.dat")
# -----------------------------------------------------------------
@lazyproperty
def galaxy_info(self):
"""
This function ...
:return:
"""
# Load the info table
table = tables.from_file(self.galaxy_info_path)
# To ordered dict
info = OrderedDict()
for name in table.colnames: info[name] = table[name][0]
# Return the info
return info
# -----------------------------------------------------------------
@lazyproperty
def hubble_type(self):
"""
This function ...
:return:
"""
return self.galaxy_info["Hubble Type"]
# -----------------------------------------------------------------
@lazyproperty
def hubble_stage(self):
"""
Thisf unction ...
:return:
"""
return self.galaxy_info["Hubble Stage"]
# -----------------------------------------------------------------
def get_model_definition(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
from .definition import ModelDefinition
# Determine model path
path = self.get_model_path(model_name)
if not fs.is_directory(path): raise ValueError("Model does not exist")
# Load the table
table = self.models_table
# Determine the stellar component paths
stellar_paths = table.stellar_component_paths_for_model(model_name)
# Determine the dust component paths
dust_paths = table.dust_component_paths_for_model(model_name)
# Create the model definition and return
return ModelDefinition(model_name, path, stellar_paths=stellar_paths, dust_paths=dust_paths)
# -----------------------------------------------------------------
def get_model(self, model_name, representation_name=None):
"""
This function ...
:param model_name:
:param representation_name:
:return:
"""
from ..core.model import RTModel
# Get the definition
definition = self.get_model_definition(model_name)
# Get the representation
if representation_name is not None: representation = self.get_model_representation(representation_name)
else: representation = None
# Get representation information
if representation is not None:
reference_wcs = representation.reference_map_wcs
else: reference_wcs = None
# Create the RTModel
return RTModel(definition, simulation_name=model_name, center=self.galaxy_center,
galaxy_name=self.galaxy_name, hubble_stage=self.hubble_stage, earth_wcs=reference_wcs)
# -----------------------------------------------------------------
@property
def models_table(self):
"""
This function ...
:return:
"""
# Open the table
return ModelsTable.from_file(self.models_table_path)
# -----------------------------------------------------------------
@property
def maps_table(self):
"""
This function ...
:return:
"""
return ModelMapsTable.from_file(self.maps_table_path)
# -----------------------------------------------------------------
def get_old_map_name_for_model(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return self.maps_table.old_stars_map_name_for_model(model_name)
# -----------------------------------------------------------------
def get_young_map_name_for_model(self, model_name):
"""
Thisn function ...
:param model_name:
:return:
"""
return self.maps_table.young_stars_map_name_for_model(model_name)
# -----------------------------------------------------------------
def get_ionizing_map_name_for_model(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return self.maps_table.ionizing_stars_map_name_for_model(model_name)
# -----------------------------------------------------------------
def get_dust_map_name_for_model(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return self.maps_table.dust_map_name_for_model(model_name)
# -----------------------------------------------------------------
@property
def model_names(self):
"""
This function ...
:return:
"""
return self.models_table.names
# -----------------------------------------------------------------
@property
def nmodels(self):
"""
This function ...
:return:
"""
return len(self.model_names)
# -----------------------------------------------------------------
@property
def has_models(self):
"""
This function ...
:return:
"""
return self.nmodels > 0
# -----------------------------------------------------------------
@property
def no_models(self):
"""
This function ...
:return:
"""
return self.nmodels == 0
# -----------------------------------------------------------------
@property
def has_single_model(self):
"""
This function ...
:return:
"""
return self.nmodels == 1
# -----------------------------------------------------------------
@property
def single_model_name(self):
"""
This function ...
:return:
"""
return self.model_names[0]
# -----------------------------------------------------------------
def get_model_path(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return fs.join(self.models_path, model_name)
# -----------------------------------------------------------------
def get_model_stellar_path(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return fs.join(self.get_model_path(model_name), "stellar")
# -----------------------------------------------------------------
def get_model_dust_path(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return fs.join(self.get_model_path(model_name), "dust")
# -----------------------------------------------------------------
def get_stellar_component_paths(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return self.models_table.stellar_component_paths_for_model(model_name).values()
# -----------------------------------------------------------------
def get_stellar_component_names(self, model_name):
"""
This function ...
:return:
"""
# NO
#return fs.directories_in_path(self.get_model_stellar_path(model_name), returns="name")
# NEW
table = self.models_table
return table.stellar_component_names_for_model(model_name)
# -----------------------------------------------------------------
def get_dust_component_paths(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return self.models_table.dust_component_paths_for_model(model_name).values()
# -----------------------------------------------------------------
def get_dust_component_names(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
# NO
#return fs.directories_in_path(self.get_model_dust_path(model_name), returns="name")
# NEW
table = self.models_table
return table.dust_component_names_for_model(model_name)
# -----------------------------------------------------------------
def is_representation(self, representation_name):
"""
Thisf unction ...
:param representation_name:
:return:
"""
path = self.get_representation_path(representation_name)
return fs.is_directory(path)
# -----------------------------------------------------------------
def get_model_name_for_representation(self, representation_name):
"""
This function ...
:param representation_name:
:return:
"""
if not self.is_representation(representation_name): raise ValueError("Representation does not exist")
# Get model name
return self.representations_table.model_for_representation(representation_name)
# -----------------------------------------------------------------
def get_representation(self, representation_name):
"""
This function ...
:param representation_name:
:return:
"""
path = self.get_representation_path(representation_name)
if not self.is_representation(representation_name): raise ValueError("Representation does not exist")
else:
model_name = self.get_model_name_for_representation(representation_name)
return Representation(representation_name, model_name, path)
# -----------------------------------------------------------------
def get_model_representation(self, representation_name):
"""
This function ...
:param representation_name:
:return:
"""
return self.get_representation(representation_name)
# -----------------------------------------------------------------
def get_representation_path(self, representation_name):
"""
This function ...
:param representation_name:
:return:
"""
return fs.join(self.representations_path, representation_name)
# -----------------------------------------------------------------
@property
def representations_table(self):
"""
This function ...
:return:
"""
return RepresentationsTable.from_file(self.representations_table_path)
# -----------------------------------------------------------------
@property
def representation_names(self):
"""
This function ...
:return:
"""
return self.representations_table.names
# -----------------------------------------------------------------
@property
def nrepresentations(self):
"""
This function ...
:return:
"""
return len(self.representation_names)
# -----------------------------------------------------------------
@property
def has_representations(self):
"""
This functino ...
:return:
"""
return self.nrepresentations > 0
# -----------------------------------------------------------------
@property
def no_representations(self):
"""
This function ...
:return:
"""
return self.nrepresentations == 0
# -----------------------------------------------------------------
@property
def has_single_representation(self):
"""
This function ...
:return:
"""
return self.nrepresentations == 1
# -----------------------------------------------------------------
@property
def single_representation_name(self):
"""
This function ...
:return:
"""
return self.representation_names[0]
# -----------------------------------------------------------------
def representations_for_model(self, model_name):
"""
This function ...
:param model_name:
:return:
"""
return self.representations_table.representations_for_model(model_name)
# -----------------------------------------------------------------
def create_deprojection_for_wcs(self, galaxy_properties, disk_position_angle, wcs, filename, scaleheight):
"""
This function ...
:param galaxy_properties: has to be passed since this class is GENERAL (BUT THIS FUNCTION CAN ONLY BE CALLED FOR A GALAXY MODELING ENVIRONMENT)
:param disk_position_angle:
:param wcs:
:param filename:
:param scaleheight:
:return:
"""
# Create the deprojection
return create_deprojection_for_wcs(galaxy_properties, disk_position_angle, wcs, filename, scaleheight)
# -----------------------------------------------------------------
def create_deprojection_for_map(self, galaxy_properties, disk_position_angle, map, filename, scaleheight, inclination=None):
"""
This function ...
:param galaxy_properties:
:param disk_position_angle:
:param map:
:param filename:
:param scaleheight:
:param inclination:
:return:
"""
# Create the deprojection
return create_deprojection_for_map(galaxy_properties, disk_position_angle, map, filename, scaleheight, inclination=inclination)
# -----------------------------------------------------------------
def load_component(self, path, add_map=False):
"""
This function ...
:param path:
:param add_map:
:return:
"""
return load_component(path, add_map=add_map)
# -----------------------------------------------------------------
def load_component_map(self, path):
"""
Thisf unction ...
:param path:
:return:
"""
return load_component_map(path)
# -----------------------------------------------------------------
def load_component_map_wcs(self, path):
"""
This function ...
:param path:
:return:
"""
return load_component_map_wcs(path)
# -----------------------------------------------------------------
def get_stellar_component_path(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# NO
#return fs.join(self.get_model_stellar_path(model_name), component_name)
# NEW
return self.models_table.stellar_component_path_for_name(model_name, component_name)
# -----------------------------------------------------------------
def get_dust_component_path(self, model_name, component_name):
"""
This function ...
:param modeling_path:
:param model_name:
:param component_name:
:return:
"""
# NO
#return fs.join(self.get_model_dust_path(model_name), component_name)
# NEW
return self.models_table.dust_component_path_for_name(model_name, component_name)
# -----------------------------------------------------------------
def load_stellar_component(self, model_name, component_name, add_map=False):
"""
This function ...
:param model_name:
:param component_name:
:param add_map:
:return:
"""
# Determine the path
path = self.get_stellar_component_path(model_name, component_name)
# Load the component
return self.load_component(path, add_map=add_map)
# -----------------------------------------------------------------
def load_stellar_components(self, model_name, add_map=False):
"""
This function ...
:param model_name:
:param add_map:
:return:
"""
# Loop over the paths
for path in self.get_stellar_component_paths(model_name):
# Give the component
yield self.load_component(path, add_map=add_map)
# -----------------------------------------------------------------
def load_stellar_component_deprojection(self, model_name, component_name, load_map=False):
"""
This function ...
:param model_name:
:param component_name:
:param load_map:
:return:
"""
from ..component.galaxy import get_disk_position_angle
# Load galaxy properties
from ..component.galaxy import get_galaxy_properties
properties = get_galaxy_properties(self.modeling_path)
# Load component
component = self.load_stellar_component(model_name, component_name, add_map=load_map)
# Get the map
if "map" in component: the_map = component.map
else: the_map = None
## Set deprojection
if "deprojection" in component:
# Get title
title = component.parameters.title
# Return
deprojection = component.deprojection
if the_map is not None: deprojection.map = the_map
return title, deprojection
# Check if this is a new component, add geometry, SED and normalization all at once
if "geometry" in component.parameters:
# Get title
title = component.parameters.title
# Check whether this is a read FITS geometry
geometry_type = component.parameters.geometry
if geometry_type != "ReadFitsGeometry": return component.parameters.title, None
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
# Get the path of the input map
filepath = geometry_properties["filename"]
# Get the scale height
scale_height = geometry_properties["axialScale"]
# Get properties
wcs = CoordinateSystem.from_file(filepath)
# Get the galaxy distance, the inclination and position angle
distance = properties.distance
inclination = properties.inclination
position_angle = get_disk_position_angle(self.modeling_path)
# Get center coordinate of galaxy
galaxy_center = properties.center
# Create
deprojection = DeprojectionModel3D.from_wcs(wcs, galaxy_center, distance, position_angle, inclination,
filepath, scale_height)
# Set the map
if the_map is not None: deprojection.map = the_map
# Return
return title, deprojection
# No deprojection
return component.parameters.title, None
# -----------------------------------------------------------------
def get_stellar_component_map_path(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# Determine the path
path = self.get_stellar_component_path(model_name, component_name)
# Return the map path
return get_component_map_path(path)
# -----------------------------------------------------------------
def load_stellar_component_map(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# Determine the path
path = self.get_stellar_component_path(model_name, component_name)
# Load the map
return self.load_component_map(path)
# -----------------------------------------------------------------
def load_stellar_component_wcs(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# Determine the path
path = self.get_stellar_component_path(model_name, component_name)
# Load the coordinate system
return self.load_component_map_wcs(path)
# -----------------------------------------------------------------
def load_dust_component(self, model_name, component_name, add_map=False):
"""
This function ...
:param model_name:
:param component_name:
:param add_map:
:return:
"""
# Determine the path
path = self.get_dust_component_path(model_name, component_name)
# Load the component
return self.load_component(path, add_map=add_map)
# -----------------------------------------------------------------
def load_dust_components(self, model_name, add_map=False):
"""
This function ...
:param model_name:
:param add_map:
:return:
"""
# Loop over the paths
for path in self.get_dust_component_paths(model_name):
# Give the component
yield self.load_component(path, add_map=add_map)
# -----------------------------------------------------------------
def load_dust_component_deprojection(self, model_name, component_name, load_map=False):
"""
This function ...
:param modeling_path:
:param model_name:
:param component_name:
:param load_map:
"""
from ..component.galaxy import get_disk_position_angle
# Load galaxy properties
from ..component.galaxy import get_galaxy_properties
properties = get_galaxy_properties(self.modeling_path)
# Load the component
component = self.load_dust_component(model_name, component_name, add_map=load_map)
# Get the map
if "map" in component: the_map = component.map
else: the_map = None
# Set deprojection
if "deprojection" in component:
# Get title
title = component.parameters.title
# Return
deprojection = component.deprojection
if the_map is not None: deprojection.map = the_map
return title, deprojection
# Check if this is a new dust component, add geometry, mix and normalization all at once
if "geometry" in component.parameters:
# Get title
title = component.parameters.title
# Check whether this is a read FITS geometry
geometry_type = component.parameters.geometry
if geometry_type != "ReadFitsGeometry": return title, None
# Get properties for each of the three classes
geometry_properties = component.properties["geometry"]
# Get the path of the input map
filepath = geometry_properties["filename"]
# Get the scale height
scale_height = geometry_properties["axialScale"]
# Get properties
wcs = CoordinateSystem.from_file(filepath)
# Get the galaxy distance, the inclination and position angle
distance = properties.distance
inclination = properties.inclination
position_angle = get_disk_position_angle(self.modeling_path)
# Get center coordinate of galaxy
galaxy_center = properties.center
# Create
deprojection = DeprojectionModel3D.from_wcs(wcs, galaxy_center, distance, position_angle, inclination,
filepath, scale_height)
# Set the map
if the_map is not None: deprojection.map = the_map
# Return
return title, deprojection
# No deprojection for this component
return component.parameters.title, None
# -----------------------------------------------------------------
def get_dust_component_map_path(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# Determine the path
path = self.get_dust_component_path(model_name, component_name)
# Return the map path
return get_component_map_path(path)
# -----------------------------------------------------------------
def load_dust_component_map(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# Determine the path
path = self.get_dust_component_path(model_name, component_name)
# Load the map
return self.load_component_map(path)
# -----------------------------------------------------------------
def load_dust_component_wcs(self, model_name, component_name):
"""
This function ...
:param model_name:
:param component_name:
:return:
"""
# Determine the path
path = self.get_dust_component_path(model_name, component_name)
# Load the WCS
return self.load_component_map_wcs(path)
# -----------------------------------------------------------------
def add_model_components(self, model_name, ski, input_map_paths):
"""
This function ...
:param model_name:
:param ski:
:param input_map_paths:
:return:
"""
# Inform the user
log.info("Adding the components of model '" + model_name + "' to the ski file ...")
# 1. Set stellar components
self.add_stellar_components(model_name, ski, input_map_paths)
# 2. Set dust components
self.add_dust_components(model_name, ski, input_map_paths)
# -----------------------------------------------------------------
def add_stellar_components(self, model_name, ski, input_map_paths):
"""
This function ...
:param model_name:
:param ski
:param input_map_paths:
:return:
"""
from .models.stars import titles as stellar_titles
# Inform the user
log.info("Adding the stellar components of model '" + model_name + "' to the ski file ...")
# Loop over the stellar components
#for name in self.get_stellar_component_names(model_name): # SLOWER BECAUSE MODELS TABLE WILL BE READ MULTIPLE TIMES
for component in self.load_stellar_components(model_name, add_map=False):
# Debugging
log.debug("Adding the '" + component.name + "' stellar component ...")
# Load the component
#component = self.load_stellar_component(model_name, name, add_map=False) # SLOWER BECAUSE ...
# Try to get the title
name = component.name
title = stellar_titles[name] if name in stellar_titles else None
# Debugging
if title is not None: log.debug("Adding the component under the title '" + title + "' ...")
# Add the stellar component
map_filename = add_stellar_component(ski, name, component, title=title)
# If map filename is defined, set path in dictionary
if map_filename is not None: input_map_paths[map_filename] = component.map_path
# -----------------------------------------------------------------
def add_dust_components(self, model_name, ski, input_map_paths):
"""
This function ...
:param model_name:
:param ski:
:param input_map_paths:
:return:
"""
from .models.dust import titles as dust_titles
# Inform the user
log.info("Adding the dust components of model '" + model_name + "' to the ski file ...")
# Loop over the dust components
#for name in self.get_dust_component_names(model_name): # SLOWER BECAUSE MODELS TABLE WILL BE READ MULTIPLE TIMES
for component in self.load_dust_components(model_name, add_map=False):
# Debugging
log.debug("Adding the '" + component.name + "' dust component ...")
# Load the component
#component = self.load_dust_component(model_name, name, add_map=False) # SLOWER BECAUSE ...
# Try to get the title
name = component.name
title = dust_titles[name] if name in dust_titles else None
# Debugging
if title is not None: log.debug("Adding the component under the title '" + title + "' ...")
# Add the dust component
map_filename = add_dust_component(ski, name, component, title=title)
# If map filename is defined, set path in dictionary
if map_filename is not None: input_map_paths[map_filename] = component.map_path
# -----------------------------------------------------------------
StaticModelSuite = create_lazified_class(ModelSuite, "StaticModelSuite")
# -----------------------------------------------------------------
def create_deprojection_for_wcs(galaxy_properties, disk_position_angle, wcs, filename, scaleheight, inclination=None):
"""
This function ...
:param galaxy_properties:
:param disk_position_angle:
:param wcs:
:param filename:
:param scaleheight:
:param inclination:
:return:
"""
# Get the galaxy distance, the inclination and position angle
distance = galaxy_properties.distance
if inclination is None: inclination = galaxy_properties.inclination
position_angle = disk_position_angle
# Get center coordinate of galaxy
galaxy_center = galaxy_properties.center
# Create deprojection
# wcs, galaxy_center, distance, pa, inclination, filepath, scale_height
deprojection = DeprojectionModel3D.from_wcs(wcs, galaxy_center, distance, position_angle, inclination, filename, scaleheight)
# Return the deprojection
return deprojection
# -----------------------------------------------------------------
def create_deprojection_for_map(galaxy_properties, disk_position_angle, map, filename, scaleheight, inclination=None):
"""
This function ...
:param galaxy_properties:
:param disk_position_angle:
:param map:
:param filename:
:param scaleheight:
:param inclination:
:return:
"""
# Get the WCS
reference_wcs = map.wcs
# Create the deprojection
return create_deprojection_for_wcs(galaxy_properties, disk_position_angle, reference_wcs, filename, scaleheight, inclination=inclination)
# -----------------------------------------------------------------
def load_component_cwd(add_map=False):
"""
This function ...
:param add_map:
:return:
"""
return load_component(fs.cwd(), add_map=add_map)
# -----------------------------------------------------------------
def load_component(path, add_map=False):
"""
This function ...
:param path:
:param add_map:
:return:
"""
# Create a map
component = Map()
# Set the name
component.name = fs.name(path)
# Load the parameters
parameters_path = fs.join(path, parameters_filename)
if fs.is_file(parameters_path):
parameters = open_mapping(parameters_path)
component.parameters = parameters
component.parameters_path = parameters_path
# Load the deprojection
deprojection_path = fs.join(path, deprojection_filename)
if fs.is_file(deprojection_path):
deprojection = DeprojectionModel3D.from_file(deprojection_path)
component.deprojection = deprojection
# Load the map
map_path = fs.join(path, model_map_filename)
if fs.is_file(map_path):
component.map_path = map_path
if add_map:
map = Frame.from_file(map_path)
component.map = map
# Load the model
model_path = fs.join(path, model_filename)
if fs.is_file(model_path):
model = load_3d_model(model_path)
component.model = model
# Load the properties
properties_path = fs.join(path, properties_filename)
if fs.is_file(properties_path):
properties = load_dict(properties_path)
component.properties = properties
# Return the component
return component
# -----------------------------------------------------------------
def get_component_map_path(path):
"""
This function ...
:param path:
:return:
"""
# Determine map path
map_path = fs.join(path, model_map_filename)
# Check
if not fs.is_file(map_path): raise IOError("The component map '" + map_path + "' does not exist")
# Return the map path
return map_path
# -----------------------------------------------------------------
def load_component_map(path):
"""
This function ...
:param path:
:return:
"""
# Get the path
map_path = get_component_map_path(path)
# Load the map and return
return Frame.from_file(map_path)
# -----------------------------------------------------------------
def load_component_map_wcs(path):
"""
This function ...
:param path:
:return:
"""
# Get the path
map_path = get_component_map_path(path)
# Load the map and return
return CoordinateSystem.from_file(map_path)
# -----------------------------------------------------------------
def load_stellar_deprojections(modeling_path, model_name):
"""
This function ...
:param modeling_path:
:param model_name:
:return:
"""
# Load the model suite
suite = ModelSuite.from_modeling_path(modeling_path)
# Return the deprojections
return get_stellar_deprojections(suite, model_name)
# -----------------------------------------------------------------
def get_stellar_deprojections(suite, model_name, deprojections=None, add_title=False):
"""
This function ...
:param suite:
:param model_name:
:param deprojections:
:param add_title:
:return:
"""
# Initialize dictinoary
return_deprojections = True
if deprojections is None: deprojections = dict()
else: return_deprojections = False
# Loop over the stellar components
for name in suite.get_stellar_component_names(model_name):
# Load the deprojection of the component, if applicable
title, deprojection = suite.load_stellar_component_deprojection(model_name, name)
if deprojection is not None:
if add_title: deprojections[(name, title)] = deprojection
else: deprojections[name] = deprojection
# Return the deprojections
if return_deprojections: return deprojections
# -----------------------------------------------------------------
def load_dust_deprojections(modeling_path, model_name):
"""
This function ...
:param modeling_path:
:param model_name:
:return:
"""
# Load the model suite
suite = ModelSuite.from_modeling_path(modeling_path)
# Return the deprojections
return get_dust_deprojections(suite, model_name)
# -----------------------------------------------------------------
def get_dust_deprojections(suite, model_name, deprojections=None, add_title=False):
"""
This function ...
:param suite:
:param model_name:
:param deprojections:
:param add_title:
:return:
"""
# Initialize dictionary
return_deprojections = True
if deprojections is not None: deprojections = dict()
else: return_deprojections = False
# Loop over the dust components
for name in suite.get_dust_component_names(model_name):
# Load the deprojection of the component, if applicable
title, deprojection = suite.load_dust_component_deprojection(model_name, name)
if deprojection is not None:
if add_title: deprojections[(name, title)] = deprojection
else: deprojections[name] = deprojection
# Return the deprojections
if return_deprojections: return deprojections
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/build/suite.py
|
Python
|
agpl-3.0
| 42,102
|
[
"Galaxy"
] |
d36267e0b44b709ad4050cd3d41ae2aced1270fd13d93750c21e13acd6c2c325
|
# Copyright Yair Benita Y.Benita@pharm.uu.nl
# Biopython (http://biopython.org) license applies
"""Calculate isoelectric points of polypeptides using methods of Bjellqvist.
pK values and the methos are taken from::
* Bjellqvist, B.,Hughes, G.J., Pasquali, Ch., Paquet, N., Ravier, F., Sanchez,
J.-Ch., Frutiger, S. & Hochstrasser, D.F.
The focusing positions of polypeptides in immobilized pH gradients can be predicted
from their amino acid sequences. Electrophoresis 1993, 14, 1023-1031.
* Bjellqvist, B., Basse, B., Olsen, E. and Celis, J.E.
Reference points for comparisons of two-dimensional maps of proteins from
different human cell types defined in a pH scale where isoelectric points correlate
with polypeptide compositions. Electrophoresis 1994, 15, 529-539.
I designed the algorithm according to a note by David L. Tabb, available at:
http://fields.scripps.edu/DTASelect/20010710-pI-Algorithm.pdf
"""
__docformat__ = "restructuredtext en"
positive_pKs = {'Nterm': 7.5, 'K': 10.0, 'R': 12.0, 'H': 5.98}
negative_pKs = {'Cterm': 3.55, 'D': 4.05, 'E': 4.45, 'C': 9.0, 'Y': 10.0}
pKcterminal = {'D': 4.55, 'E': 4.75}
pKnterminal = {'A': 7.59, 'M': 7.0, 'S': 6.93, 'P': 8.36, 'T': 6.82, 'V': 7.44, 'E': 7.7}
charged_aas = ('K', 'R', 'H', 'D', 'E', 'C', 'Y')
# access this module through ProtParam.ProteinAnalysis class.
# first make a ProteinAnalysis object and then call its isoelectric_point method.
class IsoelectricPoint(object):
def __init__(self, ProteinSequence, AminoAcidsContent):
self.sequence = ProteinSequence
self.charged_aas_content = self._select_charged(AminoAcidsContent)
# This function creates a dictionary with the contents of each charged aa,
# plus Cterm and Nterm.
def _select_charged(self, AminoAcidsContent):
charged = {}
for aa in charged_aas:
charged[aa] = float(AminoAcidsContent[aa])
charged['Nterm'] = 1.0
charged['Cterm'] = 1.0
return charged
# This function calculates the total charge of the protein at a given pH.
def _chargeR(self, pH, pos_pKs, neg_pKs):
PositiveCharge = 0.0
for aa, pK in pos_pKs.items():
CR = 10 ** (pK - pH)
partial_charge = CR / (CR + 1.0)
PositiveCharge += self.charged_aas_content[aa] * partial_charge
NegativeCharge = 0.0
for aa, pK in neg_pKs.items():
CR = 10 ** (pH - pK)
partial_charge = CR / (CR + 1.0)
NegativeCharge += self.charged_aas_content[aa] * partial_charge
return PositiveCharge - NegativeCharge
# This is the action function, it tries different pH until the charge of the protein is 0 (or close).
def pi(self):
pos_pKs = dict(positive_pKs)
neg_pKs = dict(negative_pKs)
nterm = self.sequence[0]
cterm = self.sequence[-1]
if nterm in pKnterminal:
pos_pKs['Nterm'] = pKnterminal[nterm]
if cterm in pKcterminal:
neg_pKs['Cterm'] = pKcterminal[cterm]
# Bracket between pH1 and pH2
pH = 7.0
Charge = self._chargeR(pH, pos_pKs, neg_pKs)
if Charge > 0.0:
pH1 = pH
Charge1 = Charge
while Charge1 > 0.0:
pH = pH1 + 1.0
Charge = self._chargeR(pH, pos_pKs, neg_pKs)
if Charge > 0.0:
pH1 = pH
Charge1 = Charge
else:
pH2 = pH
Charge2 = Charge
break
else:
pH2 = pH
Charge2 = Charge
while Charge2 < 0.0:
pH = pH2 - 1.0
Charge = self._chargeR(pH, pos_pKs, neg_pKs)
if Charge < 0.0:
pH2 = pH
Charge2 = Charge
else:
pH1 = pH
Charge1 = Charge
break
# Bisection
while pH2 - pH1 > 0.0001 and Charge != 0.0:
pH = (pH1 + pH2) / 2.0
Charge = self._chargeR(pH, pos_pKs, neg_pKs)
if Charge > 0.0:
pH1 = pH
Charge1 = Charge
else:
pH2 = pH
Charge2 = Charge
return pH
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SeqUtils/IsoelectricPoint.py
|
Python
|
apache-2.0
| 4,328
|
[
"Biopython"
] |
2f3052427889d261dd2eb5af0e60258e6efe324f52608639cdf0fe3aa8eb9a16
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from openbci.offline_analysis.obci_signal_processing import read_manager
import analysis_offline
class ReadSignal(object):
def __init__(self, files):
self.files = files
def process(self, files=None):
if not self.files is None:
files = self.files
mgrs = []
for f in files:
mgrs.append(read_manager.ReadManager(f['info'],
f['data'],
f['tags']))
return mgrs
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"FILES":str(self.files)})
class ExcludeChannels(object):
def __init__(self, channels):
self.channels = channels
def process(self, mgrs):
new_mgrs = []
for i_mgr in mgrs:
new_mgrs.append(analysis_offline.exclude_channels(i_mgr, self.channels))
return new_mgrs
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"CHANNELS": str(self.channels)})
class LeaveChannels(object):
def __init__(self, channels):
self.channels = channels
def process(self, mgrs):
new_mgrs = []
for i_mgr in mgrs:
new_mgrs.append(analysis_offline.leave_channels(i_mgr, self.channels))
return new_mgrs
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"CHANNELS": str(self.channels)})
class Montage(object):
def __init__(self, montage_type, **montage_params):
self.montage_type = montage_type
self.montage_params = montage_params
def process(self, mgrs):
new_mgrs = []
for i_mgr in mgrs:
new_mgrs.append(analysis_offline.montage(i_mgr, self.montage_type, **self.montage_params))
return new_mgrs
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"montage_type": self.montage_type,
"montage_params": self.montage_params})
class Normalize(object):
def __init__(self, norm=2):
self.norm = norm
def process(self, mgrs):
new_mgrs = []
for i_mgr in mgrs:
new_mgrs.append(analysis_offline.normalize(i_mgr, self.norm))
return new_mgrs
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"norm": self.norm})
class PrepareTrainSet(object):
def __init__(self, **whatever):
pass
def process(self, mgrs):
return analysis_offline.prepare_train_set(mgrs)
def __repr__(self):
return str({"CLASS": self.__class__.__name__})
class SVM(object):
def __init__(self, C, Cmode , kernel, folds=5, ret='balancedSuccessRate'):
self.C = C
self.Cmode = Cmode
self.kernel = kernel
self.folds = folds
self.ret = ret
def process(self, train_data):
return analysis_offline.svm(train_data,
self.C, self.Cmode, self.kernel, self.folds, self.ret)
def __repr__(self):
d = {"CLASS": self.__class__.__name__,
"Cmode":self.Cmode,
"folds":str(self.folds),
"s.C":str(self.C)}
k_class = self.kernel.__class__.__name__
if k_class == 'Polynomial':
k_class = k_class+"("+str(self.kernel.degree)+")"
elif k_class == 'Gaussian':
k_class = k_class+"("+str(self.kernel.gamma)+")"
d['kernel'] = k_class
return str(d)
class P300SVM(object):
def __init__(self, C, Cmode, kernel):
self.C = C
self.Cmode = Cmode
self.kernel = kernel
def process(self, mgrs):
return analysis_offline.p300_svm(mgrs,
self.C, self.Cmode, self.kernel)
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"Cmode":self.Cmode,
"kernel": self.kernel.__class__.__name__,
"s.C":str(self.C)})
class Segment(object):
def __init__(self, classes, start_offset, duration):
self.classes = classes
self.start_offset = start_offset
self.duration = duration
def process(self, mgrs):
new_mgrs = []
for mgr in mgrs:
new_mgrs = new_mgrs + analysis_offline.segment(mgr, self.classes, self.start_offset, self.duration)
return new_mgrs
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"classes": self.classes,
"start_offset": self.start_offset,
"duration": self.duration})
class Average(object):
def __init__(self, bin_selectors, bin_names, size, baseline, strategy):
self.bin_selectors = bin_selectors
self.bin_names = bin_names
self.size = size
self.baseline = baseline
self.strategy = strategy
def process(self, mgrs):
return analysis_offline.average(mgrs, self.bin_selectors, self.bin_names,
self.size, self.baseline, self.strategy)
def __repr__(self):
return str({"CLASS": self.__class__.__name__,
"bin_selectors": str(self.bin_selectors),
"bin_names": self.bin_names,
"size": self.size,
"baseline": self.baseline,
"strategy": self.strategy
})
class Filter(object):
def __init__(self, wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba', unit='radians', use_filtfilt=False):
self.wp = wp
self.ws = ws
self.gpass = gpass
self.gstop = gstop
self.analog = analog
self.ftype = ftype
self.output = output
self.unit = unit
self.use_filtfilt = use_filtfilt
def process(self, mgrs):
new_mgrs = []
for mgr in mgrs:
new_mgrs.append(analysis_offline.filter(mgr, self.wp, self.ws, self.gpass, self.gstop,
self.analog, self.ftype, self.output, self.unit, self.use_filtfilt))
return new_mgrs
def __repr__(self):
ret = self.__dict__.copy()
ret["CLASS"] = self.__class__.__name__
return str(ret)
class SaveToFile(object):
def __init__(self, dir_path, file_name):
self.dir_path = dir_path
self.file_name = file_name
def process(self, mgrs):
if len(mgrs) != 1:
raise("Too many or too few managers, storing to file aborted!")
else:
mgrs[0].save_to_file(self.dir_path, self.file_name)
class Downsample(object):
def __init__(self, factor):
self.factor = factor
def process(self, mgrs):
new_mgrs = []
for mgr in mgrs:
new_mgrs.append(analysis_offline.downsample(mgr, self.factor))
return new_mgrs
def __repr__(self):
ret = self.__dict__.copy()
ret["CLASS"] = self.__class__.__name__
return str(ret)
class ToMvTransform(object):
def __init__(self):
pass
def process(self, p_mgrs):
pass
#dla każdego mgr-a:
# pobierz jego gain i offset i zrób jego dane danymi w mikrowoltach
class Plot(object):
def __init__(self, channel):
self.channel = channel
def process(self, mgrs):
for mgr in mgrs:
analysis_offline.plot(mgr, self.channel)
return mgrs
def __repr__(self):
ret = {}
ret["CLASS"] = self.__class__.__name__
return str(ret)
|
BrainTech/openbci
|
obci/analysis/p300/chain_analysis_offline.py
|
Python
|
gpl-3.0
| 7,721
|
[
"Gaussian"
] |
426ca6a79b1749bcfce96d7a52e39af7215fc00ca0707c093c80fd97a24af963
|
# -*- coding: utf-8 -*-
"""OpenERP community addons standard plugin for flake8"""
from __future__ import absolute_import
import common_checker
from common_checker.base_checker import BaseChecker
# When OpenERP version 8 API will be frozen
# We will be able to do version toggle here
import v7
__version__ = '0.0.1'
class OpenERPConventionsChecker(object):
"""Check OpenERP conventions
It will call the function 'visit(root_node)' for all checker instances
registered in BaseCheckerMeta
"""
name = 'OpenERP convention'
version = __version__
def __init__(self, tree, filename):
"""Constructor
:param tree: root ast.node of current module
:param filename: current module filename
"""
self.tree = tree if tree else ()
self.filename = filename
self.checks = BaseChecker._checks
def run(self):
"""Run the checks"""
return self.check_tree(self.tree)
def check_tree(self, tree_root):
"""Apply all checks registered in BaseCheckerMeta on root ast.node
:param tree_root: Root ast node of the namespace
:returns: yeld list of errors codes
"""
for check in self.checks:
check.set_filename(self.filename)
check.visit(tree_root)
for error in check.errors:
yield error
|
nbessi/openerp-conventions
|
openerp_conventions.py
|
Python
|
mit
| 1,370
|
[
"VisIt"
] |
0283b5c0cbc78a66f82b6342c63cc72c3c0cd3b0b69b7595aaff2bb02cfed375
|
#
#-*- coding:utf-8 -*-
"""
Gentoo-keys - actions.py
Primary api interface module
@copyright: 2012 by Brian Dolbec <dol-sen@gentoo.org>
@license: GNU GPL2, see COPYING for details.
"""
from __future__ import print_function
import os
from gkeys.seedhandler import SeedHandler
from gkeys.lib import GkeysGPG
from gkeys.seed import Seeds
Available_Actions = ['listseed', 'addseed', 'removeseed', 'moveseed',
'listseedfiles', 'listkey', 'addkey', 'removekey', 'movekey',
'installed']
class Actions(object):
'''Primary api actions'''
def __init__(self, config, output=None, logger=None):
self.config = config
self.output = output
self.logger = logger
self.seeds = None
def load_seeds(self, seeds=None ,seedfile=None):
if not seeds and not seedfile:
self.logger.error("ACTIONS: load_seeds; no filename to load: "
"setting = %s. Please use the -s option to indicate: which seed "
"file to use." % seedfile)
return None
if seeds:
filepath = self.config.get_key(seeds + "-seedfile")
elif seedfile:
filepath = os.path.join(self.config.get_key('seedsdir'),
'%s.seeds' % seedfile)
self.logger.debug("ACTIONS: load_seeds; seeds filepath to load: "
"%s" % filepath)
seeds = Seeds()
seeds.load(filepath)
return seeds
def listseed(self, args):
'''Action listseed method'''
handler = SeedHandler(self.logger)
kwargs = handler.build_gkeydict(args)
self.logger.debug("ACTIONS: listseed; kwargs: %s" % str(kwargs))
if not self.seeds:
self.seeds = self.load_seeds(args.seeds, args.seedfile)
if self.seeds:
results = self.seeds.list(**kwargs)
return results
return None
def addseed(self, args):
'''Action addseed method'''
handler = SeedHandler(self.logger)
gkey = handler.new(args, checkgkey=True)
gkeys = self.listseed(args)
if len(gkeys) == 0:
self.logger.debug("ACTIONS: addkey; now adding gkey: %s" % str(gkey))
success = self.seeds.add(getattr(gkey, 'nick')[0], gkey)
if success:
success = self.seeds.save()
return ["Successfully added new seed: %s" % str(success), gkey]
else:
messages = ["Matching seeds found in seeds file",
"Aborting... \nMatching seeds:"]
messages.extend(gkeys)
return messages
def removeseed(self, args):
'''Action removeseed method'''
handler = SeedHandler(self.logger)
searchkey = handler.new(args)
self.logger.debug("ACTIONS: removeseed; gkey: %s" % str(searchkey))
gkeys = self.listseed(args)
if not gkeys:
return ["Failed to remove seed: No gkeys returned from listseed()",
None]
if len(gkeys) == 1:
self.logger.debug("ACTIONS: removeseed; now deleting gkey: %s" % str(gkeys))
success = self.seeds.delete(gkeys)
if success:
success = self.seeds.save()
return ["Successfully removed seed: %s" % str(success),
gkeys]
elif len(gkeys):
messages = ["Too many seeds found to remove"]
messages.extend(gkeys)
return messages
return ["Failed to remove seed:", searchkey,
"No matching seed found"]
def moveseed(self, args):
'''Action moveseed method'''
handler = SeedHandler(self.logger)
searchkey = handler.new(args, needkeyid=False, checkintegrity=False)
self.logger.debug("ACTIONS: moveseed; gkey: %s" % str(searchkey))
if not self.seeds:
self.seeds = self.load_seeds(args.seeds)
kwargs = handler.build_gkeydict(args)
sourcekeys = self.seeds.list(**kwargs)
dest = self.load_seeds(args.destination)
destkeys = dest.list(**kwargs)
messages = []
if len(sourcekeys) == 1 and destkeys == []:
self.logger.debug("ACTIONS: moveseed; now adding destination gkey: %s"
% str(sourcekeys[0]))
success = dest.add(sourcekeys[0])
self.logger.debug("ACTIONS: moveseed; success: %s" %str(success))
self.logger.debug("ACTIONS: moveseed; now deleting sourcekey: %s" % str(sourcekeys[0]))
success = self.seeds.delete(sourcekeys[0])
if success:
success = dest.save()
self.logger.debug("ACTIONS: moveseed; destination saved... %s" %str(success))
success = self.seeds.save()
messages.extend(["Successfully Moved %s seed: %s"
% (args.seeds, str(success)), sourcekeys[0]])
return messages
elif len(sourcekeys):
messages = ["Too many seeds found to move"]
messages.extend(sourcekeys)
return messages
messages.append("Failed to move seed:")
messages.append(searchkey)
messages.append('\n')
messages.append("Source seeds found...")
messages.extend(sourcekeys or ["None\n"])
messages.append("Destination seeds found...")
messages.extend(destkeys or ["None\n"])
return messages
def listkey(self, args):
'''Action listskey method'''
self.seeds = self.load_seeds(args.seeds)
if self.seeds:
handler = SeedHandler(self.logger)
kwargs = handler.build_gkeydict(args)
# get the desired seed
keyresults = self.seeds.list(**kwargs)
if keyresults and not args.nick == '*' and self.output:
self.output(keyresults, "\n Found GKEY seeds:")
elif keyresults and self.output:
self.output(['all'], "\n Installed seeds:")
else:
self.logger.info("ACTIONS: listkey; "
"Matching seed entry not found")
if args.nick:
return {"Search failed for: %s" % args.nick: False}
elif args.name:
return {"Search failed for: %s" % args.name: False}
else:
return {"Search failed for search term": False}
# get confirmation
# fill in code here
keydir = self.config.get_key(args.seeds + "-keydir")
self.logger.debug("ACTIONS: listkey; keysdir = %s" % keydir)
self.gpg = GkeysGPG(self.config, keydir)
results = {}
#failed = []
print(" GPG output:")
for key in keyresults:
if not key.keydir and not args.nick == '*':
self.logger.debug("ACTIONS: listkey; NO keydir... Ignoring")
return {"Failed: No keyid's found for %s" % key.name : ''}
self.logger.debug("ACTIONS: listkey; listing keydir:"
+ str(key.keydir))
results[key.name] = self.gpg.list_keys(key.keydir)
if self.config.options['print_results']:
print(results[key.name].output)
self.logger.debug("data output:\n" +
str(results[key.name].output))
#for result in results[key.name].status.data:
#print("key desired:", key.name, ", keydir listed:",
#result)
#self.logger.debug("data record: " + str(result))
else:
return results
return {'done': True}
else:
return {"No keydirs to list": False}
def addkey(self, args):
'''Action addkey method'''
handler = SeedHandler(self.logger)
kwargs = handler.build_gkeydict(args)
self.logger.debug("ACTIONS: listseed; kwargs: %s" % str(kwargs))
self.seeds = self.load_seeds(args.seeds)
if self.seeds:
# get the desired seed
keyresults = self.seeds.list(**kwargs)
if keyresults and not args.nick == '*' and self.output:
self.output(keyresults, "\n Found GKEY seeds:")
elif keyresults and self.output:
self.output(['all'], "\n Installing seeds:")
else:
self.logger.info("ACTIONS: addkey; "
"Matching seed entry not found")
if args.nick:
return {"Search failed for: %s" % args.nick: False}
elif args.name:
return {"Search failed for: %s" % args.name: False}
else:
return {"Search failed for search term": False}
# get confirmation
# fill in code here
keydir = self.config.get_key(args.seeds + "-keydir")
self.logger.debug("ACTIONS: addkey; keysdir = %s" % keydir)
self.gpg = GkeysGPG(self.config, keydir)
results = {}
failed = []
for key in keyresults:
if not key.keyid and not key.longkeyid and not args.nick == '*':
self.logger.debug("ACTIONS: addkey; NO key id's to add... Ignoring")
return {"Failed: No keyid's found for %s" % key.name : ''}
elif not key.keyid and not key.longkeyid:
print("No keyid's found for:", key.nick, key.name, "Skipping...")
failed.append(key)
continue
self.logger.debug("ACTIONS: addkey; adding key:")
self.logger.debug("ACTIONS: " + str(key))
results[key.name] = self.gpg.add_key(key)
for result in results[key.name]:
self.logger.debug("ACTIONS: addkey; result.failed = " +
str(result.failed))
if self.config.options['print_results']:
for result in results[key.name]:
print("key desired:", key.name, ", key added:",
result.username, ", succeeded:",
not result.failed, ", keyid:", result.keyid,
", fingerprint:", result.fingerprint)
self.logger.debug("stderr_out: " + str(result.stderr_out))
if result.failed:
failed.append(key)
if failed and self.output:
self.output(failed, "\n Failed to install:")
return {'Completed': True}
return {"No seeds to search or install": False}
def removekey(self, args):
'''Action removekey method'''
pass
def movekey(self, args):
'''Action movekey method'''
pass
def installed(self, args):
'''Action installed method.
lists the installed key directories'''
pass
def user_confirm(self, message):
'''Get input from the user to confirm to proceed
with the desired action
@param message: string, user promt message to display
@return boolean: confirmation to proceed or abort
'''
pass
def listseedfiles(self, args):
seedfile = []
seedsdir = self.config.get_key('seedsdir')
for files in os.listdir(seedsdir):
if files.endswith('.seeds'):
seedfile.append(files)
return {"Seed files found at path: %s\n %s"
% (seedsdir, "\n ".join(seedfile)): True}
|
dol-sen/gentoo-keys
|
gkeys/actions.py
|
Python
|
gpl-2.0
| 11,653
|
[
"Brian"
] |
6eea7d507fb25a9538978b03b8171bd7d78e8a47bc6a6021c4577e910a3b8d68
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
@utx.skipIfMissingFeatures("LENNARD_JONES")
class AnalyzeDistributions(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
np.random.seed(1234)
num_part = 10
@classmethod
def setUpClass(cls):
box_l = 20.0
# start with a small box
cls.system.box_l = np.array([box_l, box_l, box_l])
cls.system.cell_system.set_n_square(use_verlet_lists=False)
for p in range(cls.num_part):
cls.system.part.add(
id=p,
pos=np.random.random() * cls.system.box_l)
def calc_min_distribution(self, bins):
dist = []
for i in range(self.num_part):
dist.append(np.min([self.system.distance(
self.system.part[i], p.pos) for p in self.system.part if p.id != i]))
hist = np.histogram(dist, bins=bins, density=False)[0]
return hist / (float(np.sum(hist)))
# test system.analysis.distribution(), all the same particle types
def test_distribution_lin(self):
# increase PBC to remove mirror images
old_pos = self.system.part[:].pos.copy()
self.system.box_l = self.system.box_l * 2.
self.system.part[:].pos = old_pos
r_min = 0.0
r_max = 100.0
r_bins = 100
bins = np.linspace(r_min, r_max, num=r_bins + 1, endpoint=True)
# no int flag
core_rdf = self.system.analysis.distribution(type_list_a=[0],
type_list_b=[0],
r_min=r_min,
r_max=r_max,
r_bins=r_bins,
log_flag=0,
int_flag=0)
# bins
np.testing.assert_allclose(core_rdf[0], (bins[1:] + bins[:-1]) * 0.5)
# rdf
np.testing.assert_allclose(core_rdf[1],
self.calc_min_distribution(bins))
# with int flag
core_rdf = self.system.analysis.distribution(type_list_a=[0],
type_list_b=[0],
r_min=r_min,
r_max=r_max,
r_bins=r_bins,
log_flag=0,
int_flag=1)
np.testing.assert_allclose(core_rdf[1],
np.cumsum(self.calc_min_distribution(bins)))
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/analyze_distribution.py
|
Python
|
gpl-3.0
| 3,526
|
[
"ESPResSo"
] |
d34688696f748af7c39c460db054cd7d715d17641cd9c74848deab267f4842d4
|
"""User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.mul import _keep_coeff
from sympy.core.symbol import Symbol
from sympy.core.basic import preorder_traversal
from sympy.core.relational import Relational
from sympy.core.sympify import sympify
from sympy.core.decorators import _sympifyit
from sympy.logic.boolalg import BooleanAtom
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group, sift, public
import sympy.polys
import sympy.mpmath
from sympy.mpmath.libmp.libhyper import NoConvergence
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable
@public
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set([])
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*sqrt(x), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(f, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
if not f.rep.dom.has_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(f, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(f, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
def eval(f, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(f, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(f, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
RootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.RootOf(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[RootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.RootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[RootOf(x**3 + x + 1, 0),
RootOf(x**3 + x + 1, 1),
RootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.RootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [sympy.mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = sympy.mpmath.mp.dps
sympy.mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = sympy.mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, r.imag))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
sympy.mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
try:
rep, opt = _dict_from_expr(expr, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
try:
reps, opt = _parallel_dict_from_expr(exprs, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree', 1, exc)
return sympify(F.degree(opt.gen))
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
if isinstance(f, Equality):
return f
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
for arg in Mul.make_args(expr):
if arg.is_Number:
coeff *= arg
continue
elif arg.is_Pow:
base, exp = arg.args
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), -2*sqrt(2) + 2)
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
coeffs = f.monic().all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2] and not all(coeffx.is_rational for coeffx in coeffs):
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
sifted = sift(args, lambda z: z.is_rational)
c1, c2 = sifted[True], sifted[False]
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for wx in f.values() if wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
r = _try_rescale(f)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
"""
from sympy.core.exprtools import factor_terms
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
sifted = sift(f.args, lambda x: x.is_commutative and not x.has(Piecewise))
c, nc = sifted[True], sifted[False]
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func._from_args(c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of `solve_poly_system()`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
polys[i] = ring.from_dict(poly.rep.to_dict())
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.has_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
from sympy.functions import Piecewise
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/polys/polytools.py
|
Python
|
mit
| 171,109
|
[
"Gaussian"
] |
6b26dd59225c93802a32ac952799ed45b737bb78a53c0652bb657ded355be75b
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Game based on Maxwell's demon, a thought experiment used to teach statistical
thermodynamics. The user has to scoop particles from a chamber and guide them
to another chamber through a channel with the help of a snake controlled by a
gamepad or the keyboard. The particle imbalance between chambers creates
a pressure gradient that makes it harder to move particles to the chamber
with an excess of particles.
"""
from threading import Thread
import numpy as np
import time
import espressomd
import espressomd.shapes
from espressomd.visualization_opengl import openGLLive, KeyboardButtonEvent, KeyboardFireEvent
required_features = ["LENNARD_JONES", "WCA", "MASS",
"EXTERNAL_FORCES", "LANGEVIN_PER_PARTICLE"]
espressomd.assert_features(required_features)
print("""THE CHAMBER GAME
YOUR GOAL IS TO SCOOP ALL BLUE PARTICLES INTO THE RIGHT BOX.
GREEN/RED SPHERES CAN BE PICKED UP TO INCREASE/DECREASE
THE TEMPERATURE IN THE CHAMBER WHERE THEY ARE COLLECTED.""")
try:
import pygame
has_pygame = True
print("\nCONTROLS:"
"\nMOVE: (JOYSTICK AXIS), (KEYBOARD i/j/k/l)"
"\nACTION BUTTON: (JOYSTICK A), (KEYBOARD p)"
"\nRESTART: (JOYSTICK START), (KEYBOARD b)")
except BaseException:
has_pygame = False
print("\nCONTROLS:"
"\nMOVE: (KEYBOARD i/j/k/l)"
"\nACTION BUTTON: (KEYBOARD p)"
"\nRESTART: (KEYBOARD b)")
box = np.array([1500.0, 500.0, 150.0])
system = espressomd.System(box_l=box)
system.set_random_state_PRNG()
# PARAMETERS
# PHYSICS
temperature_snake = 0.0
gamma_snake_head = 1.0
gamma_snake_bead = 15.0
temperature_bubbles = 10000.0
temp_l = temperature_bubbles
temp_r = temperature_bubbles
temp_max = 1e5
gamma_bubbles = 0.5
temperature = 1.0
gamma = 1.0
system.time_step = 0.001
# SNAKE
snake_n = 10
snake_head_sigma = 50.0
snake_bead_sigma = 20.0
snake_length = (snake_n - 1) * snake_bead_sigma + snake_head_sigma
snake_startpos = [snake_head_sigma, box[1] - snake_head_sigma, box[2] * 0.5]
snake_head_type = 0
snake_bead_type = 1
snake_head_mass = 1000.0
snake_bead_mass = 10.0
harmonic_k = 500.0 * snake_bead_mass
# PORE
pore_length = box[0] * 0.25
pore_xl = box[0] * 0.5 - pore_length * 0.5
pore_xr = box[0] * 0.5 + pore_length * 0.5
cylinder_type = 2
cylinder_sigma = 1.0
pore_radius = snake_head_sigma * 1.3
# CONTROL
move_force = 70000.0
expl_range = 200.0
expl_force = 20000.0
# BUBBLES
bubble_type = 3
bubble_sigma = 36.0
bubble_snake_eps = 10
bubble_bubble_eps = 10000.0
bubble_mass = 50.0
bubbles_n = 180
# TEMP CHANGE PARTICLE
temp_change_radius = 25
temp_change_inc_type = 4
temp_change_dec_type = 5
dtemp = 1000.0
# VISUALIZER
zoom = 10
visualizer = openGLLive(
system,
window_size=[800, 600],
draw_axis=False,
particle_sizes=[
snake_head_sigma * 0.5,
snake_bead_sigma * 0.5,
cylinder_sigma,
bubble_sigma * 0.5,
temp_change_radius,
temp_change_radius],
particle_type_colors=[[1, 1, 0],
[1, 0, 1],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0],
[1, 0, 0],
[0.5, 0, 1]],
constraint_type_colors=[[1, 1, 1]],
camera_position=[snake_startpos[0],
snake_startpos[1],
system.box_l[2] * zoom],
camera_target=snake_startpos)
# JOYPAD CONTROL
if has_pygame:
pygame.init()
pygame.joystick.init()
# CHECK FOR JOYSTICKS
if pygame.joystick.get_count() > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
joystick_control = True
else:
joystick_control = False
# CELLSYSTEM
system.cell_system.skin = 3.0
system.cell_system.set_domain_decomposition(use_verlet_lists=False)
# BONDS
harmonic_head = espressomd.interactions.HarmonicBond(
k=harmonic_k, r_0=0.5 * (snake_head_sigma + snake_bead_sigma))
harmonic_bead = espressomd.interactions.HarmonicBond(
k=harmonic_k, r_0=snake_bead_sigma)
system.bonded_inter.add(harmonic_head)
system.bonded_inter.add(harmonic_bead)
# PARTICLES
# SNAKE
for i in range(snake_n):
if i == 0:
p_head = system.part.add(
pos=snake_startpos,
type=snake_head_type,
fix=[0, 0, 1],
mass=snake_head_mass,
temp=temperature_snake,
gamma=gamma_snake_head)
else:
system.part.add(
pos=snake_startpos
+ np.array([0, -1, 0])
* (0.5 * (snake_head_sigma + snake_bead_sigma)
+ (i - 1) * snake_bead_sigma),
bonds=(harmonic_bead if (i > 1) else harmonic_head, i - 1),
type=snake_bead_type,
fix=[0, 0, 1],
mass=snake_bead_mass,
temp=temperature_snake,
gamma=gamma_snake_bead)
# NB INTER
WCA_cut = 2.0**(1. / 6.)
system.non_bonded_inter[snake_head_type, snake_head_type].wca.set_params(
epsilon=1.0, sigma=snake_head_sigma)
sm = 0.5 * (snake_head_sigma + snake_bead_sigma)
system.non_bonded_inter[snake_bead_type, snake_head_type].wca.set_params(
epsilon=1.0, sigma=sm)
system.non_bonded_inter[snake_bead_type, snake_bead_type].wca.set_params(
epsilon=1.0, sigma=snake_bead_sigma)
sm = 0.5 * (snake_head_sigma + cylinder_sigma)
system.non_bonded_inter[snake_head_type, cylinder_type].wca.set_params(
epsilon=10.0, sigma=sm)
sm = 0.5 * (snake_bead_sigma + cylinder_sigma)
system.non_bonded_inter[snake_bead_type, cylinder_type].wca.set_params(
epsilon=10.0, sigma=sm)
sm = 0.5 * (bubble_sigma + snake_bead_sigma)
system.non_bonded_inter[snake_bead_type, bubble_type].wca.set_params(
epsilon=bubble_snake_eps, sigma=sm)
sm = 0.5 * (bubble_sigma + snake_head_sigma)
system.non_bonded_inter[snake_head_type, bubble_type].wca.set_params(
epsilon=1.0, sigma=sm)
sm = 0.5 * (bubble_sigma + cylinder_sigma)
system.non_bonded_inter[bubble_type, cylinder_type].lennard_jones.set_params(
epsilon=1000.0, sigma=sm, cutoff=2.5 * sm, shift="auto")
system.non_bonded_inter[bubble_type, bubble_type].lennard_jones.set_params(
epsilon=bubble_bubble_eps, sigma=bubble_sigma, cutoff=2.5 * bubble_sigma, shift="auto")
# CONSTRAINTS
system.constraints.add(shape=espressomd.shapes.Wall(
dist=0, normal=[1, 0, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(
dist=-box[0], normal=[-1, 0, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(
dist=0, normal=[0, 1, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.Wall(
dist=-box[1], normal=[0, -1, 0]), particle_type=cylinder_type, penetrable=True)
system.constraints.add(shape=espressomd.shapes.SimplePore(
center=0.5 * box, axis=[1, 0, 0], length=pore_length, radius=pore_radius,
smoothing_radius=5), particle_type=cylinder_type, penetrable=True)
# BUBBLES
n = 0
while n < bubbles_n:
# bpos = [pore_xr + np.random.random() * (pore_xr - pore_xl -
# snake_head_sigma*4) + snake_head_sigma * 2, np.random.random() * box[1],
# box[2]*0.5]
bpos = [np.random.random() * (pore_xl - snake_head_sigma * 4) +
snake_head_sigma * 2, np.random.random() * box[1], box[2] * 0.5]
system.part.add(
pos=bpos,
type=bubble_type,
fix=[0, 0, 1],
mass=bubble_mass,
temp=temperature_bubbles,
gamma=gamma_bubbles)
testid = len(system.part) - 1
n += 1
if system.analysis.dist_to(id=testid) < bubble_sigma * 0.5:
system.part[testid].remove()
n -= 1
p_bubbles = np.where(system.part[:].type == bubble_type)[0]
# TEMP CHANGE PARTICLES
bpos = [np.random.random() * (pore_xl - snake_head_sigma * 4) +
snake_head_sigma * 2, np.random.random() * box[1], box[2] * 0.5]
p_temp_inc = system.part.add(
pos=bpos,
type=temp_change_inc_type,
fix=[1, 1, 1])
bpos = [pore_xr
+ np.random.random() * (pore_xr - pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
p_temp_dec = system.part.add(
pos=bpos,
type=temp_change_dec_type,
fix=[1, 1, 1])
# MINIMIZE ENERGY
energy = system.analysis.energy()
#print("Before Minimization: E_total = {}".format(energy['total']))
system.minimize_energy.init(f_max=100, gamma=30.0,
max_steps=10000, max_displacement=0.01)
system.minimize_energy.minimize()
energy = system.analysis.energy()
#print("After Minimization: E_total = {}".format(energy['total']))
p_startpos = system.part[:].pos
# THERMOSTAT
system.thermostat.set_langevin(kT=temperature, gamma=gamma, seed=42)
# CONTROL CALLBACKS
F_act_k = np.zeros(2)
F_act_j = np.zeros(2)
def move_up_set():
global F_act_k
F_act_k[1] = 1.0
set_particle_force()
def move_down_set():
global F_act_k
F_act_k[1] = -1.0
set_particle_force()
def move_updown_reset():
global F_act_k
F_act_k[1] = 0
set_particle_force()
def move_left_set():
global F_act_k
F_act_k[0] = -1.0
set_particle_force()
def move_right_set():
global F_act_k
F_act_k[0] = 1.0
set_particle_force()
def move_leftright_reset():
global F_act_k
F_act_k[0] = 0
set_particle_force()
def set_particle_force():
global F_act_j, F_act_k
F_control_tot = np.append(np.clip(F_act_k + F_act_j, -1, 1), 0)
system.part[0].ext_force = move_force * F_control_tot
def restart():
system.part[:].pos = p_startpos
system.galilei.kill_particle_motion()
system.galilei.kill_particle_forces()
expl_time = 0
exploding = False
def explode():
global exploding, expl_time
if not exploding:
exploding = True
expl_time = time.time()
for p in system.part[p_bubbles]:
dv = p.pos - p_head.pos
lv = np.linalg.norm(dv)
if lv < expl_range:
p.v = dv / lv / lv * expl_force
# KEYBOARD CONTROLS
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('i', KeyboardFireEvent.Pressed, move_up_set))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('k', KeyboardFireEvent.Pressed, move_down_set))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('i', KeyboardFireEvent.Released, move_updown_reset))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('k', KeyboardFireEvent.Released, move_updown_reset))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('j', KeyboardFireEvent.Pressed, move_left_set))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('l', KeyboardFireEvent.Pressed, move_right_set))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('j', KeyboardFireEvent.Released, move_leftright_reset))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('l', KeyboardFireEvent.Released, move_leftright_reset))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('p', KeyboardFireEvent.Pressed, explode))
visualizer.keyboardManager.register_button(
KeyboardButtonEvent('b', KeyboardFireEvent.Pressed, restart))
# MAIN LOOP
def main():
global F_act_j, F_act_k, temp_l, temp_r, exploding, expl_time
def T_to_g(temp):
return 0.1 + 5.0 / (1.0 + 0.001 * temp)
zoom_eq = 5.0
zoom_v = 0.0
zoom_a = 0.0
zoom = zoom_eq
zoom_dt = 0.01
ud_cnt = 0
tincF = 0
tdecF = 0
exploding = False
button_A_old = 0
button_Start_old = 0
while True:
# INTEGRATE
system.integrator.run(1)
if p_head.pos[0] > pore_xl and p_head.pos[0] < pore_xr:
z_eq = 10.0
v_f = 0.1
else:
z_eq = zoom_eq
v_f = 1.0
# CAMERA TRACKING
zoom_a = (z_eq - zoom) * 0.2 - zoom_v * 0.8 + v_f * \
0.005 * np.linalg.norm(system.part[0].v)
zoom_v += zoom_a * zoom_dt
zoom += zoom_v * zoom_dt + zoom_a * zoom_dt * zoom_dt
camPos = np.copy(system.part[0].pos) - box * 0.5
camPos[2] = box[2] * zoom
camTarget = system.part[0].pos - box * 0.5
t = camPos - camTarget
r = np.linalg.norm(t)
visualizer.camera.state_pos = camPos
visualizer.camera.state_target = -t / r
visualizer.camera.update_modelview()
# COUNT L/R
ud_cnt += 1
if ud_cnt > 100:
ud_cnt = 0
pl = system.part.select(
lambda p: p.pos[0] < pore_xl and p.type == bubble_type)
pr = system.part.select(
lambda p: p.pos[0] > pore_xr and p.type == bubble_type)
Nl = len(pl)
Nr = len(pr)
for p in pl:
p.temp = temp_l
p.gamma = T_to_g(temp_l)
for p in pr:
p.temp = temp_r
p.gamma = T_to_g(temp_r)
w = visualizer.specs['window_size']
visualizer.user_texts = [
[[20, w[1] - 20], 'LEFT: {} RIGHT: {}'.format(Nl, Nr)],
[[20, w[1] - 40], 'TEMPERATURE LEFT: {:.0f} TEMPERATURE RIGHT: {:.0f}'.format(temp_l, temp_r)]]
# [[w[0] * 0.5, w[1] - 60], 'GAMMA LEFT: {:0.4f} GAMMA RIGHT: {:0.4f}'.format( T_to_g(temp_l), T_to_g(temp_r))]]
# TEMP CHANGE COLLISION
repos_temp_inc = False
repos_temp_dec = False
if np.linalg.norm(
p_head.pos - p_temp_inc.pos) < temp_change_radius + snake_head_sigma * 0.5:
repos_temp_inc = True
if p_temp_inc.pos[0] > box[0] * 0.5:
temp_r += dtemp
if temp_r > temp_max:
temp_r = temp_max
else:
temp_l += dtemp
if temp_l > temp_max:
temp_l = temp_max
if np.linalg.norm(
p_head.pos - p_temp_dec.pos) < temp_change_radius + snake_head_sigma * 0.5:
repos_temp_dec = True
if p_temp_dec.pos[0] > box[0] * 0.5:
temp_r -= dtemp
if temp_r < 0:
temp_r = 0.0
for p in system.part[p_bubbles]:
if p.pos[0] > pore_xr:
p.v = [0, 0, 0]
else:
temp_l -= dtemp
if temp_l < 0:
temp_l = 0.0
for p in system.part[p_bubbles]:
if p.pos[0] < pore_xl:
p.v = [0, 0, 0]
# PLACE TEMP CHANGE PARTICLES
tincF += 1
tdecF += 1
if repos_temp_inc or tincF > 5000:
tincF = 0
if np.random.random() < 0.5:
p_temp_inc.pos = [np.random.random()
* (pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
else:
p_temp_inc.pos = [pore_xr
+ np.random.random()
* (pore_xr - pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
if repos_temp_dec or tdecF > 5000:
tdecF = 0
if np.random.random() < 0.5:
p_temp_dec.pos = [np.random.random()
* (pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
else:
p_temp_dec.pos = [pore_xr
+ np.random.random()
* (pore_xr - pore_xl - snake_head_sigma * 4)
+ snake_head_sigma * 2,
np.random.random() * box[1],
box[2] * 0.5]
# REENABLE EXPLOSION
if exploding and time.time() - expl_time > 1:
exploding = False
# VISUALIZER
visualizer.update()
if has_pygame:
if joystick_control:
pygame.event.get()
axis_l = np.array(
[joystick.get_axis(0), -joystick.get_axis(1)])
axis_r = np.array(
[joystick.get_axis(3), -joystick.get_axis(4)])
button_A = joystick.get_button(0)
button_Start = joystick.get_button(7)
if not button_A_old and button_A:
explode()
if not button_Start_old and button_Start:
restart()
button_A_old = button_A
button_Start_old = button_A
hat = joystick.get_hat(0)
F_act_j = np.clip(np.array(hat) + axis_l + axis_r, -1, 1)
set_particle_force()
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
|
psci2195/espresso-ffans
|
samples/chamber_game.py
|
Python
|
gpl-3.0
| 17,946
|
[
"ESPResSo"
] |
f117e5eb7a9ec85e5373ff11fdba989b6257cd86c04437b3acaa48e067f5f39f
|
# ******************************************************************************
# pysimm.apps.random_walk module
# ******************************************************************************
#
# psuedo random walk algorithm written using pysimm tools
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from time import strftime
from itertools import permutations, izip
import numpy as np
from pysimm import system, lmps, forcefield, calc
from pysimm import error_print
def find_last_backbone_vector(s, m):
"""pysimm.apps.random_walk.find_last_backbone_vector
Finds vector between backbone atoms in terminal monomer. Requires current system s, and reference monomer m.
Args:
s: :class:`~pysimm.system.System` object
m: :class:`~pysimm.system.System` object
Returns:
list of vector components
"""
head_pos = [0, 0, 0]
tail_pos = [0, 0, 0]
for p in s.particles[-1*m.particles.count:]:
if p.linker == 'head':
head_pos = [p.x, p.y, p.z]
elif p.linker == 'tail':
tail_pos = [p.x, p.y, p.z]
return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]
def copolymer(m, nmon, s_=None, **kwargs):
"""pysimm.apps.random_walk.copolymer
Builds copolymer using random walk methodology using pattern
Args:
m: list of reference monomer :class:`~pysimm.system.System`s
nmon: total number of monomers to add to chain
s_: :class:`~pysimm.system.System` in which to build polymer chain (None)
settings: dictionary of simulation settings
density: density at which to build polymer (0.3)
forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters
capped: True/False if monomers are capped
unwrap: True to unwrap final system
traj: True to build xyz trajectory of polymer growth (True)
pattern: list of pattern for monomer repeat units, should match length of m ([1 for _ in range(len(m))])
limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)
sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth
Returns:
new copolymer :class:`~pysimm.system.System`
"""
m = [x.copy() for x in m]
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
pattern = kwargs.get('pattern', [1 for _ in range(len(m))])
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
for m_ in m:
m_.add_particle_bonding()
for p in m_.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m_.remove_linker_types()
if s_ is None:
s = system.replicate(m[0], 1, density=density/nmon)
else:
s = system.replicate(m[0], 1, s_=s_, density=density/nmon)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
for p in s.particles:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
for m_ in m:
if capped:
m_.particles.remove(1)
m_.remove_spare_bonding()
m_.add_particle_bonding()
s.add_particle_bonding()
if traj:
s.write_xyz('random_walk.xyz')
temp_nmon = 1
while True:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
if temp_nmon == 1 and p_ == 1:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
elif temp_nmon == 1:
p_ -= 1
for insert in range(p_):
head = None
tail = None
backbone_vector = np.array([last_head.x - last_tail.x,
last_head.y - last_tail.y,
last_head.z - last_tail.z])
ref_head = None
ref_tail = None
for p in m_.particles:
if p.linker == 'head':
ref_head = p
elif p.linker == 'tail':
ref_tail = p
if ref_head and ref_tail:
ref_backbone_vector = np.array([ref_head.x - ref_tail.x,
ref_head.y - ref_tail.y,
ref_head.z - ref_tail.z])
rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)
m_.rotate(around=ref_tail, rot_matrix=rot_matrix)
translation_vector = [last_tail.x - ref_tail.x,
last_tail.y - ref_tail.y,
last_tail.z - ref_tail.z]
for p in m_.particles:
p.x = p.x + translation_vector[0] + 3*backbone_vector[0]
p.y = p.y + translation_vector[1] + 3*backbone_vector[1]
p.z = p.z + translation_vector[2] + 3*backbone_vector[2]
else:
print('reference molecule has no head or tail')
n = m_.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
s.add(n, change_dim=False)
s.add_particle_bonding()
head = last_head
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
s.make_new_bonds(head, tail, f)
temp_nmon += 1
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))
if unwrap:
s.unwrap()
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (temp_nmon)
sim.run(np=settings.get('np'))
if unwrap:
s.unwrap()
if unwrap:
s.wrap()
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
if temp_nmon >= nmon:
break
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
def random_walk(m, nmon, s_=None, **kwargs):
"""pysimm.apps.random_walk.random_walk
Builds homopolymer using random walk methodology
Args:
m: reference monomer :class:`~pysimm.system.System`
nmon: total number of monomers to add to chain
s_: :class:`~pysimm.system.System` in which to build polymer chain (None)
extra_bonds: EXPERMINTAL, True if making ladder backbone polymer
settings: dictionary of simulation settings
density: density at which to build polymer (0.3)
forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters
capped: True/False if monomers are capped
unwrap: True to unwrap final system
traj: True to build xyz trajectory of polymer growth (True)
limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)
sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth
Returns:
new polymer :class:`~pysimm.system.System`
"""
m = m.copy()
extra_bonds = kwargs.get('extra_bonds', False)
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
m.add_particle_bonding()
for p in m.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m.remove_linker_types()
if s_ is None:
s = system.replicate(m, 1, density=density/nmon)
else:
s = system.replicate(m, 1, s_=s_, density=None)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
if traj:
s.write_xyz('random_walk.xyz')
if capped:
m.particles.remove(1)
m.remove_spare_bonding()
m.add_particle_bonding()
for insertion in range(nmon - 1):
head = None
tail = None
backbone_vector = np.array(find_last_backbone_vector(s, m))
for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):
p_.x = p.x + 3*backbone_vector[0]
p_.y = p.y + 3*backbone_vector[1]
p_.z = p.z + 3*backbone_vector[2]
n = m.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
if extra_bonds:
heads = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
heads.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
head = p
s.add(n, change_dim=False)
s.add_particle_bonding()
if extra_bonds:
tails = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tails.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
for p in s.particles:
if not p.bonded_to:
print(p.tag)
if head and tail:
s.make_new_bonds(head, tail, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
elif extra_bonds and len(heads) == len(tails):
for h, t in izip(heads, tails):
s.make_new_bonds(h, t, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
else:
print('cannot find head and tail')
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (insertion+2)
sim.run(np=settings.get('np'))
s.unwrap()
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
|
plin1112/pysimm
|
pysimm/apps/random_walk.py
|
Python
|
mit
| 13,307
|
[
"LAMMPS"
] |
2802887d3f7a64df06c08165590853303b366879a3978b47fda3660932365698
|
#!/usr/bin/env python2
# Copyright (C) 2020
# Max Planck Institute for Polymer Research & JGU Mainz
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import espressopp
from espressopp.tools import readxyz
import time
def generate_md(use_vec=True, vec_mode=""):
print '{}USING VECTORIZATION'.format('NOT ' if not use_vec else '')
if use_vec:
print 'MODE={}'.format(vec_mode)
nsteps = 1
isteps = 10
#
# NOTE: For performance comparison increase isteps to 1000
#
rc = 2.5
skin = 0.3
timestep = 0.005
epsilon = 1.0
sigma = 1.0
# ensure deterministic trajectories
temperature = None
xyz_file = "lennard_jones_fluid_10000_2048.xyz"
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = readxyz(xyz_file)
box = (Lx, Ly, Lz)
num_particles = len(pid)
system, integrator = espressopp.standard_system.Default(box=box, rc=rc, skin=skin, dt=timestep, temperature=temperature)
if use_vec:
vec = espressopp.vectorization.Vectorization(system, integrator, mode=vec_mode)
props = ['id', 'type', 'mass', 'pos', 'v']
new_particles = []
for i in range(num_particles):
part = [i + 1, 0, 1.0, espressopp.Real3D(x[i], y[i], z[i]), espressopp.Real3D(vx[i], vy[i], vz[i])]
new_particles.append(part)
system.storage.addParticles(new_particles, *props)
system.storage.decompose()
# Lennard-Jones with Verlet list
if use_vec:
vl = espressopp.vectorization.VerletList(system, vec, cutoff = rc)
interLJ = espressopp.vectorization.interaction.VerletListLennardJones(vl)
potLJ = espressopp.vectorization.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc, shift=0)
else:
vl = espressopp.VerletList(system, cutoff = rc)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
potLJ = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc, shift=0)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interLJ)
print ''
print 'number of particles = ', num_particles
print "storage = ", system.storage.__class__.__name__
print "integrator = ", integrator.__class__.__name__
print "verletlist = ", ".".join([vl.__class__.__module__,vl.__class__.__name__])
print "interaction = ", ".".join([interLJ.__class__.__module__,interLJ.__class__.__name__])
print ''
if hasattr(vl,'resetTimers'):
vl.resetTimers()
if use_vec: vl.rebuildPairs()
espressopp.tools.analyse.info(system, integrator)
start_time = time.clock()
for k in range(nsteps):
integrator.run(isteps)
if use_vec: vl.rebuildPairs()
espressopp.tools.analyse.info(system, integrator)
end_time = time.clock()
espressopp.tools.analyse.final_info(system, integrator, vl, start_time, end_time)
# retrieve particle positions after run
configurations = espressopp.analysis.Configurations(system, pos=True, vel=True, force=True)
configurations.gather()
return [configurations[0][i] for i in range(num_particles)]
class TestVectorization(unittest.TestCase):
def test1(self):
''' Ensure that positions after integration are the same for both vec and non-vec versions '''
print '-'*70
pos0 = generate_md(True,'AOS')
print '-'*70
pos1 = generate_md(True,'SOA')
print '-'*70
pos2 = generate_md(False)
print '-'*70
self.assertEqual(len(pos0), len(pos2))
diff = [(pos0[i]-pos2[i]).sqr() for i in range(len(pos2))]
for d in diff:
self.assertAlmostEqual(d,0.0,8)
self.assertEqual(len(pos1), len(pos2))
diff = [(pos1[i]-pos2[i]).sqr() for i in range(len(pos1))]
for d in diff:
self.assertAlmostEqual(d,0.0,8)
if __name__ == "__main__":
unittest.main()
|
govarguz/espressopp
|
testsuite/vectorization/test_vectorization.py
|
Python
|
gpl-3.0
| 4,384
|
[
"ESPResSo"
] |
c22c12b216a18c65a77e653aedf5c51ae4d0c111b9020c9bbe8098602c624622
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# showvgridprivatefile - View VGrid private files for owners and members
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Show the requested file located in a given vgrids private_base dir if the
client is an owner or a member of the vgrid. Members are allowed to read private
files but not write them, therefore they don't have a private_base link where
they can access them like owners do.
"""
import os
import shared.returnvalues as returnvalues
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.validstring import valid_user_path
from shared.vgrid import vgrid_is_owner_or_member
def signature():
"""Signature of the main function"""
defaults = {'vgrid_name': REJECT_UNSET, 'path': REJECT_UNSET}
return ['file_output', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
vgrid_name = accepted['vgrid_name'][-1]
path = accepted['path'][-1]
if not vgrid_is_owner_or_member(vgrid_name, client_id,
configuration):
output_objects.append({'object_type': 'error_text', 'text':
'''You must be an owner or member of %s %s to
access the private files.''' % (vgrid_name, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.vgrid_private_base,
vgrid_name)) + os.sep
# Strip leading slashes to avoid join() throwing away prefix
rel_path = path.lstrip(os.sep)
real_path = os.path.abspath(os.path.join(base_dir, rel_path))
if not valid_user_path(real_path, base_dir, True):
output_objects.append({'object_type': 'error_text', 'text':
'''You are not allowed to use paths outside %s
private files dir.''' % configuration.site_vgrid_label})
return (output_objects, returnvalues.CLIENT_ERROR)
try:
private_fd = open(real_path, 'rb')
entry = {'object_type': 'binary',
'data': private_fd.read()}
# Cut away all the usual web page formatting to show only contents
output_objects = [{'object_type': 'start', 'headers': []}, entry,
{'object_type': 'script_status'},
{'object_type': 'end'}]
private_fd.close()
except Exception, exc:
output_objects.append({'object_type': 'error_text', 'text'
: 'Error reading %s private file (%s)'
% (configuration.site_vgrid_label, exc)})
return (output_objects, returnvalues.SYSTEM_ERROR)
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/showvgridprivatefile.py
|
Python
|
gpl-2.0
| 4,210
|
[
"Brian"
] |
9c59246136423bae4ebf17c009cfaf4ddb821bcd1b85dcbc5f9d9923cb0631f2
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Implementation of classes related to till operations. """
import datetime
import gtk
from kiwi.currency import currency
from kiwi.ui.objectlist import Column, ColoredColumn
from stoqlib.api import api
from stoqlib.domain.till import TillEntry
from stoqlib.gui.search.searchdialog import SearchDialog
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.tilleditor import (CashAdvanceEditor, CashInEditor,
CashOutEditor)
from stoqlib.gui.stockicons import (STOQ_MONEY, STOQ_MONEY_ADD,
STOQ_MONEY_REMOVE)
from stoqlib.gui.search.searchcolumns import IdentifierColumn
from stoqlib.gui.search.searchfilters import DateSearchFilter
from stoqlib.gui.search.searchoptions import Today
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.lib.defaults import payment_value_colorize
from stoqlib.reporting.till import TillHistoryReport
_ = stoqlib_gettext
class TillHistoryDialog(SearchDialog):
size = (780, -1)
search_spec = TillEntry
selection_mode = gtk.SELECTION_MULTIPLE
searchbar_labels = _('Till Entries matching:')
title = _('Till history')
#
# SearchDialog
#
def get_columns(self, *args):
return [IdentifierColumn('identifier', title=_('Entry #'), sorted=True),
Column('date', _('Date'), data_type=datetime.date),
Column('time', _('Time'), data_type=datetime.time),
Column('description', _('Description'), data_type=str,
expand=True),
ColoredColumn('value', _('Value'), data_type=currency,
color='red', data_func=payment_value_colorize,
width=140)]
def create_filters(self):
self.set_text_field_columns(['description'])
self.date_filter = DateSearchFilter(_('Date:'))
self.date_filter.select(Today)
self.add_filter(self.date_filter, columns=['date'])
# add summary label
value_format = '<b>%s</b>'
total_label = '<b>%s</b>' % api.escape(_(u'Total:'))
self.search.set_summary_label('value', total_label, value_format)
def setup_widgets(self):
self.results.set_visible_rows(10)
self.results.connect('has-rows', self._has_rows)
self._add_editor_button(_('Cash _Add...'), CashAdvanceEditor,
STOQ_MONEY)
self._add_editor_button(_('Cash _In...'), CashInEditor,
STOQ_MONEY_ADD)
self._add_editor_button(_('Cash _Out...'), CashOutEditor,
STOQ_MONEY_REMOVE)
self.print_button = gtk.Button(None, gtk.STOCK_PRINT, True)
self.print_button.set_property("use-stock", True)
self.print_button.connect('clicked', self._print_button_clicked)
self.action_area.set_layout(gtk.BUTTONBOX_START)
self.action_area.pack_end(self.print_button, False, False, 6)
self.print_button.show()
self.print_button.set_sensitive(False)
#
# Private API
#
def _add_editor_button(self, name, editor_class, stock):
button = self.add_button(name, stock=stock)
button.connect('clicked', self._run_editor, editor_class)
button.show()
def _print_button_clicked(self, button):
print_report(TillHistoryReport, self.results, list(self.results),
filters=self.search.get_search_filters())
def _run_editor(self, button, editor_class):
with api.new_store() as store:
run_dialog(editor_class, self, store)
if store.committed:
self.search.refresh()
self.results.unselect_all()
if len(self.results):
self.results.select(self.results[-1])
def _has_rows(self, results, obj):
self.print_button.set_sensitive(obj)
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/tillhistory.py
|
Python
|
gpl-2.0
| 4,857
|
[
"VisIt"
] |
1fa1d043702f257825a27cc9522097351a67fb86b1abc96ba9722fdab43e6102
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
import sys
sys.path.append("../")
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import matplotlib.cm as cm
import os
###############################################################################################################
# TO LOAD
###############################################################################################################
store = pd.HDFStore("../../figures/figures_articles_v2/figure6/determinant_corr.h5", 'r')
det_all = store['det_all']
shufflings = store['shufflings']
shuffl_shank = store['shuffling_shank']
store.close()
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# WHICH NEURONS
space = pd.read_hdf("../../figures/figures_articles_v2/figure1/space.hdf5")
burst = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
burst = burst.loc[space.index]
hd_index = space.index.values[space['hd'] == 1]
# neurontoplot = [np.intersect1d(hd_index, space.index.values[space['cluster'] == 1])[0],
# burst.loc[space.index.values[space['cluster'] == 0]].sort_values('sws').index[3],
# burst.sort_values('sws').index.values[-20]]
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:20]
autocorr_rem = autocorr_rem[2:20]
autocorr_sws = autocorr_sws[2:20]
neurons = np.intersect1d(swr.dropna(1).columns.values, autocorr_sws.dropna(1).columns.values)
neurons = np.intersect1d(neurons, fr_index)
X = np.copy(swr[neurons].values.T)
Y = np.copy(np.vstack((autocorr_wak[neurons].values,autocorr_rem[neurons].values, autocorr_sws[neurons].values))).T
Y = Y - Y.mean(1)[:,np.newaxis]
Y = Y / Y.std(1)[:,np.newaxis]
pca_swr = PCA(n_components=10).fit(X)
pca_aut = PCA(n_components=10).fit(Y)
pc_swr = pca_swr.transform(X)
pc_aut = pca_aut.transform(Y)
All = np.hstack((pc_swr, pc_aut))
corr = np.corrcoef(All.T)
#shuffle
Xs = np.copy(X)
Ys = np.copy(Y)
np.random.shuffle(Xs)
np.random.shuffle(Ys)
pc_swr_sh = PCA(n_components=10).fit_transform(Xs)
pc_aut_sh = PCA(n_components=10).fit_transform(Ys)
Alls = np.hstack((pc_swr_sh, pc_aut_sh))
corrsh = np.corrcoef(Alls.T)
###############################################################################################################
# PLOT
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean*1.5 # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
# mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 8, # LaTeX default is 10pt font.
"font.size": 7,
"legend.fontsize": 7, # Make the legend/label fonts a little smaller
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 0.2,
"axes.linewidth" : 0.8,
"ytick.major.size" : 1.5,
"xtick.major.size" : 1.5
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.cm as cmx
import matplotlib.colors as colors
# colors = ['#444b6e', '#708b75', '#9ab87a']
fig = figure(figsize = figsize(1.0))
gs = gridspec.GridSpec(3,3, wspace = 0.3, hspace = 0.3, width_ratios = [1,1,1], height_ratios = [1.8,1.8,1])
#########################################################################
# A. EXEMPLES HD
#########################################################################
gsA = gridspec.GridSpecFromSubplotSpec(2,1, subplot_spec=gs[0,0], hspace = 0.6, wspace = 0.4) #, width_ratios=[0.6,0.6,0.6], hspace = 0.2, wspace = 0.2)#, height_ratios = [1,1,0.2,1])
gs1 = gridspec.GridSpecFromSubplotSpec(1,2, subplot_spec=gsA[0,0], width_ratios = [0.7, 0.3])
ex_hd = ['Mouse17-130129_14', 'Mouse17-130129_18']
titles = ['Wake', 'REM', 'NREM']
# SWR HD
subplot(gs1[0,0])
simpleaxis(gca())
plot(swr[ex_hd[0]], '-', color = 'red', linewidth = 1)
plot(swr[ex_hd[1]], '--', color = 'red', linewidth = 1)
xlabel("Time from SWRs (ms)", labelpad = -0.0)
ylabel("Modulation")
gca().text(-0.3, 1.02, "a", transform = gca().transAxes, fontsize = 10, fontweight='bold')
# TUNING CURVES HD
tcurves = cPickle.load(open('../../figures/figures_articles_v4/figure1/good_100ms_pickle/Mouse17-130129.pickle', 'rb'))['swr'][0]['tcurves']
subplot(gs1[0,1], projection = 'polar')
for n, l in zip(ex_hd, ['-', '--']):
tmp = tcurves[int(n.split("_")[1])]
plot(tmp/tmp.max(), l, color = 'red', linewidth = 1)
gca().get_xaxis().tick_bottom()
gca().get_yaxis().tick_left()
xticks(np.arange(0, 2*np.pi, np.pi/4), ['0', '', '$\pi/2$', '', '$\pi$', '', '$3\pi/2$',''])
yticks([])
grid(linestyle = '--')
gca().tick_params(axis='x', pad = -3)
# title("Wake", pad = 7)
gs2 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=gsA[1,0], wspace = 0.5)
# AUTOCORR
for i, ep in zip(range(3),['wake', 'rem', 'sws']):
subplot(gs2[0,i])
simpleaxis(gca())
tmp = store_autocorr[ep][ex_hd]
tmp.loc[0] = 0.0
tmp1 = tmp.loc[:0].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp2 = tmp.loc[0:].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp = pd.concat([tmp1.loc[:-0.5],tmp2])
tmp.loc[0] = 0.0
plot(tmp.loc[-50:50,ex_hd[0]], '-', color = 'red', linewidth = 1)
plot(tmp.loc[-50:50,ex_hd[1]], '--', color = 'red', linewidth = 1)
title(titles[i], fontsize = 8, pad = 3)
yticks([0, 5])
if i == 0:
ylabel("Autocorr.")
if i == 1:
xlabel("Time (ms)", labelpad = -0.0)
#########################################################################
# B. Examples shank
#########################################################################
# see main_search_examples_fig3.py
# neurons_to_plot = ['Mouse17-130207_39', 'Mouse17-130207_43', 'Mouse17-130207_37']
neurons_to_plot = ['Mouse17-130207_42', 'Mouse17-130207_37']
neuron_seed = 'Mouse17-130207_43'
titles = ['Wake', 'REM', 'NREM']
cmap = get_cmap('tab10')
color1 = cmap(0)
color2 = cmap(1)
color3 = cmap(2)
colors = [color1, color3]
colors = ['#4D85BD', '#7CAA2D']
color2 = 'rosybrown'
color_ex = [colors[0], color2, colors[1]]
lbs = ['b', 'c']
new_path = data_directory+neuron_seed.split('-')[0]+'/'+neuron_seed.split("_")[0]
meanWaveF = scipy.io.loadmat(new_path+'/Analysis/SpikeWaveF.mat')['meanWaveF'][0]
lw = 1.25
idx = [0,2]
for i, n in enumerate(neurons_to_plot):
gsB = gridspec.GridSpecFromSubplotSpec(2,1, subplot_spec=gs[0,i+1], hspace = 0.6, wspace = 0.4) #, width_ratios=[0.6,0.6,0.6], hspace = 0.2, wspace = 0.2)#, height_ratios = [1,1,0.2,1])
pairs = [neuron_seed, n]
# CORRELATION SWR
gs2 = gridspec.GridSpecFromSubplotSpec(1,2, subplot_spec=gsB[0,0], width_ratios = [0.7, 0.3])
subplot(gs2[0,0])
simpleaxis(gca())
plot(swr[neuron_seed], color = color2, linewidth = lw)
plot(swr[n], color =colors[i], linewidth = lw)
xlabel("Time from SWRs (ms)", labelpad = -0.01)
gca().text(-0.3, 1.0, lbs[i], transform = gca().transAxes, fontsize = 10, fontweight='bold')
# WAVEFORMS
gswave = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec = gs2[0,1])#, wspace = 0.3, hspace = 0.6)
subplot(gswave[:,0])
noaxis(gca())
for c in range(8):
plot(meanWaveF[int(neuron_seed.split('_')[1])][c]+c*200, color = color2, linewidth = lw)
# title("Mean waveforms (a.u.)", fontsize = 8)
subplot(gswave[:,1])
noaxis(gca())
for c in range(8):
plot(meanWaveF[int(n.split('_')[1])][c]+c*200, color = colors[i], linewidth = lw)
if i == 0:
xlabel("Waveforms", fontsize = 7)
# CORRELATION AUTO
gs3 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=gsB[1,0], wspace = 0.5)
for j, ep in enumerate(['wake', 'rem', 'sws']):
subplot(gs3[0,j])
simpleaxis(gca())
title(titles[j], fontsize = 8, pad = 3)
tmp = store_autocorr[ep][pairs]
tmp.loc[0] = 0.0
tmp1 = tmp.loc[:0].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp2 = tmp.loc[0:].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp = pd.concat([tmp1.loc[:-0.5],tmp2])
tmp.loc[0] = 0.0
plot(tmp.loc[-50:50,neuron_seed], color = color2, linewidth = lw)
plot(tmp.loc[-50:50,n], color = colors[i], linewidth = lw)
if j == 1:
xlabel("Time (ms)", labelpad = -0.0)
# if i == 1 and j == 0:
# gca().text(-0.5, 1.15, "c", transform = gca().transAxes, fontsize = 10, fontweight='bold')
########################################################################
# D. MAPS FAR-AWAY EXEMPLES
########################################################################
gsD = gridspec.GridSpecFromSubplotSpec(1,3, subplot_spec=gs[1,0:2], hspace = 0.6, wspace = 0.35, width_ratios = [0.7, 0.7, 0.05]) #, width_ratios=[0.6,0.6,0.6], hspace = 0.2, wspace = 0.2)#, height_ratios = [1,1,0.2,1])
carte_adrien = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_ALL-01.png')
bound_adrien = (-398/1254, 3319/1254, -(239/1254 - 20/1044), 3278/1254)
# specific to mouse 17
subspace = pd.read_hdf("../../figures/figures_articles_v2/figure1/subspace_Mouse17.hdf5")
data = cPickle.load(open("../../figures/figures_articles_v2/figure1/rotated_images_Mouse17.pickle", 'rb'))
rotated_images = data['rotated_images']
new_xy_shank = data['new_xy_shank']
bound = data['bound']
data = cPickle.load(open("../../data/maps/Mouse17.pickle", 'rb'))
x = data['x']
y = data['y']*-1.0+np.max(data['y'])
headdir = data['headdir']
xy_pos = new_xy_shank.reshape(len(y), len(x), 2)
def show_labels(ax):
ax.text(0.68, 1.09, "AM", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(1.26, 1.26, "VA", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(0.92, 2.05, "AVd", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'), rotation = 50)
ax.text(1.14, 1.72, "AVv", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(1.28, 2.25, "LD", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(0.42, 2.17, "sm", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(0.20, 1.89, "MD", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(-0.06, 1.58, "PV", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'))
ax.text(0.4, 1.5, "IAD", fontsize = 6.5, bbox=dict(facecolor='#C9C9C9', edgecolor = 'none', boxstyle='square,pad=-0.1'), rotation = 52)
return
suB = subplot(gsD[0,0])
imshow(carte_adrien, extent = bound_adrien, interpolation = 'bessel', aspect = 'equal')
i = 1
m = 'Mouse17'
tmp2 = headdir
tmp2[tmp2<0.05] = 0.0
scatter(new_xy_shank[:,0], new_xy_shank[:,1], s = 2, color = 'black', marker = '.',
alpha = 1.0, linewidths = 0.5, label = 'shank position')
scatter(new_xy_shank[:,0], new_xy_shank[:,1], s = tmp2*7., label = 'HD cell position',
color = 'red', marker = 'o', alpha = 0.6)
plot([2.2,2.2],[0,1], '-', linewidth = 1.3, color = 'black')
suB.text(2.25, 0.5, "1 mm", rotation = -90)
# show_labels(suB)
leg = legend(loc = 'lower left', fontsize = 7, framealpha=1.0, bbox_to_anchor=(0.0, -0.09)) #, title = 'HD recording sites', )
noaxis(suB)
leg.get_title().set_fontsize(7)
leg.get_frame().set_facecolor('white')
annotate('Anterodorsal (AD)', xy=(0.9,2.4), xytext=(0.9,2.7), xycoords='data', textcoords='data',
arrowprops=dict(facecolor='black',
shrink=0.05,
headwidth=3,
headlength=2,
width=0.3),
fontsize = 7, ha = 'center', va = 'bottom')
suB.text(-0.20, 1.24, "d", transform = suB.transAxes, fontsize = 10, fontweight = 'bold')
# pair = ['Mouse17-130207_29', 'Mouse17-130211_20', 'Mouse17-130212_13']
# pair = ['Mouse17-130201_31', 'Mouse17-130218_8', 'Mouse17-130218_8']
# pair = ['Mouse17-130203_13', 'Mouse17-130211_27', 'Mouse17-130205_27']
pair = ['Mouse17-130129_3', 'Mouse17-130206_29', 'Mouse17-130212_23']
tricolor = ['#3e3e3e', '#7d7d7d', '#9e9e9e']
xy_pos = new_xy_shank.reshape(len(y), len(x), 2)
pos = space.loc[list(pair), ['session', 'shank']]
x = xy_pos[pos['session'],pos['shank'],0]
y = xy_pos[pos['session'],pos['shank'],1]
plot(x, y, linewidth = 1, color = 'black', zorder = 1)
scatter(x, y, edgecolors = 'white', c = tricolor, zorder = 2)
########################################################################
# D. EXEMPLES FAR AWAY
########################################################################
gsDD = gridspec.GridSpecFromSubplotSpec(2,1, subplot_spec=gsD[0,1], hspace = 0.6, wspace = 0.4) #, width_ratios=[0.6,0.6,0.6], hspace = 0.2, wspace = 0.2)#, height_ratios = [1,1,0.2,1])
# CORRELATION SWR
subplot(gsDD[0,0])
simpleaxis(gca())
plot(swr[pair[0]], color = tricolor[0], linewidth = lw)
plot(swr[pair[1]], color = tricolor[1], linewidth = lw)
plot(swr[pair[2]], color = tricolor[2], linewidth = lw)
xlabel("Time from SWRs (ms)", labelpad = -0.01)
ylabel("Modulation")
# CORRELATION AUTO
gs3 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=gsDD[1,0], wspace = 0.5)
for j, ep in enumerate(['wake', 'rem', 'sws']):
subplot(gs3[0,j])
simpleaxis(gca())
title(titles[j], fontsize = 8, pad = 3)
tmp = store_autocorr[ep][pair]
tmp.loc[0] = 0.0
tmp1 = tmp.loc[:0].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp2 = tmp.loc[0:].rolling(window=20,win_type='gaussian',center=True,min_periods=1).mean(std=3.0)
tmp = pd.concat([tmp1.loc[:-0.5],tmp2])
tmp.loc[0] = 0.0
plot(tmp.loc[-50:50,pair[0]], color = tricolor[0], linewidth = lw)
plot(tmp.loc[-50:50,pair[1]], color = tricolor[1], linewidth = lw)
plot(tmp.loc[-50:50,pair[2]], color = tricolor[2], linewidth = lw)
if j == 1:
xlabel("Time lag (ms)", labelpad = -0.0)
if j == 0:
ylabel("Autocorr.")
########################################################################
# E. PCA
########################################################################
neurontoplot = [neuron_seed]+neurons_to_plot
gsB = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs[1,-1])#, width_ratios = [0.05, 0.95])#, hspace = 0.1, wspace = 0.5)#, height_ratios = [1,1,0.2,1])
# EXEMPLE PCA SWR
subplot(gsB[0,:])
simpleaxis(gca())
gca().spines['bottom'].set_visible(False)
gca().set_xticks([])
axhline(0, linewidth = 0.5, color = 'black')
for i, n in enumerate(neurontoplot):
idx = np.where(n == neurons)[0][0]
scatter(np.arange(pc_aut.shape[1])+i*0.2, pc_aut[idx], 2, color = color_ex[i])
for j in np.arange(pc_swr.shape[1]):
plot([j+i*0.2, j+i*0.2], [0, pc_aut[idx][j]], linewidth = 1.2, color = color_ex[i])
yticks([-4,0])
ylabel("PCA weights")
gca().yaxis.set_label_coords(-0.15,0.1)
# title("PCA")
gca().text(-0.2, 1.10, "e", transform = gca().transAxes, fontsize = 10, fontweight='bold')
gca().text(0.15, 0.95, "Autocorr.", transform = gca().transAxes, fontsize = 8)
# EXEMPLE PCA AUTOCORR
gsAA = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gsB[1,:], height_ratios = [0.4,1], hspace = 0.1)#, hspace = 0.1, height_ratios = [1,0.4])
ax1 = subplot(gsAA[0,:])
ax2 = subplot(gsAA[1,:], sharex = ax1)
simpleaxis(ax1)
simpleaxis(ax2)
ax1.spines['bottom'].set_visible(False)
# ax2.spines['bottom'].set_visible(False)
ax1.set_xticks([])
ax1.xaxis.set_tick_params(size=0)
ax2.set_xticks(np.arange(10))
ax2.set_xticklabels(np.arange(10)+1)
ax2.axhline(0, linewidth = 0.5, color = 'black')
for i, n in enumerate(neurontoplot):
idx = np.where(n == neurons)[0][0]
ax1.scatter(np.arange(pc_swr.shape[1])+i*0.2, pc_swr[idx], 2, color = color_ex[i])
ax2.scatter(np.arange(pc_swr.shape[1])+i*0.2, pc_swr[idx], 2, color = color_ex[i])
for j in np.arange(pc_aut.shape[1]):
ax1.plot([j+i*0.2, j+i*0.2],[0, pc_swr[idx][j]], linewidth = 1.2, color = color_ex[i])
ax2.plot([j+i*0.2, j+i*0.2],[0, pc_swr[idx][j]], linewidth = 1.2, color = color_ex[i])
ax2.set_xlabel("Components")
idx = [np.where(n == neurons)[0][0] for n in neurontoplot]
ax2.set_ylim(pc_swr[idx,0].min()-1, pc_swr[idx,1:].max()+0.6)
ax1.set_ylim(pc_swr[idx,0].max()-1, pc_swr[idx,0].max()+0.6)
ax1.set_yticks([13])
d = .005 # how big to make the diagonal lines in axes coordinates
kwargs = dict(transform=ax1.transAxes, color='k', clip_on=False, linewidth = 1)
ax1.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax1.text(0.2, 1.15, "SWRs", transform = ax1.transAxes, fontsize = 8)
# title("PCA")
###########################################################################
# F MATRIX CORRELATION
###########################################################################
gsC = gridspec.GridSpecFromSubplotSpec(1,2,subplot_spec=gs[2,0], wspace = 0.04)#, hspace = 0.1, wspace = 0.5)#, height_ratios = [1,1,0.2,1])
subplot(gsC[0,0])
noaxis(gca())
vmin = np.minimum(corr[0:10,10:].min(), corrsh[0:10,10:].min())
vmax = np.maximum(corr[0:10,10:].max(), corrsh[0:10,10:].max())
imshow(corr[0:10,10:], vmin = vmin, vmax = vmax)
ylabel("SWR")
xlabel("Autocorr.")
gca().text(0.25, 1.25, "Cell-by-cell correlation", transform = gca().transAxes, fontsize = 7)
gca().text(-0.35, 1.23, "f", transform = gca().transAxes, fontsize = 10, fontweight='bold')
gca().text(0.1, -0.45, r"$\rho^{2} = $"+str(np.round(1-np.linalg.det(corr),2)), transform = gca().transAxes, fontsize = 7)
title("Actual", fontsize = 8, pad = 2)
# MATRIX SHUFFLED
subplot(gsC[0,1])
noaxis(gca())
imshow(corrsh[0:10,10:], vmin = vmin, vmax = vmax)
title("Shuffle", fontsize = 8, pad = 2)
# ylabel("SWR")
# xlabel("Autocorr.")
gca().text(0.15, -0.45, r"$\rho^{2} = $"+str(np.round(1-np.linalg.det(corrsh),2)), transform = gca().transAxes, fontsize = 7)
#########################################################################
# G. SHUFFLING + CORR
#########################################################################
subplot(gs[2,1])
simpleaxis(gca())
axvline(1-det_all['all'], color = 'red')
hist(1-shufflings['all'], 100, color = 'black', weights = np.ones(len(shufflings['all']))/len(shufflings['all']), label = 'All', histtype='stepfilled')
hist(1-shuffl_shank, 100, color = 'grey', alpha = 0.7, weights = np.ones(len(shuffl_shank))/len(shuffl_shank), label = 'Nearby', histtype='stepfilled')
xlabel(r"Total correlation $\rho^{2}$")
ylabel("Probability (%)")
yticks([0,0.02,0.04], ['0','2','4'])
gca().text(-0.23, 1.08, "g", transform = gca().transAxes, fontsize = 10, fontweight='bold')
gca().text(1-det_all['all']-0.05, gca().get_ylim()[1], "p<0.001",fontsize = 7, ha = 'center', color = 'red')
legend(edgecolor = None, facecolor = None, frameon = False, loc = 'lower left', bbox_to_anchor = (0.35, 0.6))
#########################################################################
# H. CONTROL 1
#########################################################################
# subplot(gs[2,2])
# simpleaxis(gca())
gsG = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec=gs[2,2], hspace = 0.4)#, hspace = 0.1, wspace = 0.5)#, height_ratios = [1,1,0.2,1])
store = pd.HDFStore("../../figures/figures_articles_v2/figure6/determinant_corr_noSWS.h5", 'r')
det_all = store['det_all']
shufflings = store['shufflings']
store.close()
subplot(gsG[0,0])
simpleaxis(gca())
gca().text(-0.22, 1.2, "h", transform = gca().transAxes, fontsize = 9, fontweight='bold')
# colors = ['blue', 'red', 'green']
colors = ["#CA3242","#849FAD", "#27647B", "#57575F"]
labels = ['WAKE', 'REM', 'NREM']
offset = [0.0265, 0.0265, 0.011]
for i, k in enumerate(['wak', 'rem', 'sws']):
shuf, x = np.histogram(1-shufflings[k], bins = 100, weights = np.ones(len(shufflings[k]))/len(shufflings[k]))
axvline(1-det_all[k], color = colors[i])
# plot([1-det_all[k], 1-det_all[k]], [0, 0.032], color = colors[i], label = labels[i])
plot(x[0:-1]+np.diff(x), shuf, color = colors[i], alpha = 0.7)
# hist(, label = k, histtype='stepfilled', facecolor = 'None', edgecolor = colors[i])
# gca().text(1-det_all[k], gca().get_ylim()[1], "p<0.001",fontsize = 7, ha = 'center', color = 'red')
gca().text(1-det_all[k], offset[i], labels[i], ha = 'center', fontsize = 7, bbox = dict(facecolor='white', edgecolor=colors[i],boxstyle='square,pad=0.2'))
axvline(0.33, color = 'black', linestyle = '--')
gca().text(0.33, 0.025, 'ALL', ha = 'center', fontsize = 7, bbox = dict(facecolor='white', edgecolor='black',boxstyle='square,pad=0.2'))
ylim(0, 0.035)
# xlabel(r"Total correlation $\rho^{2}$")
ylabel("P (%)")
yticks([0,0.01,0.02,0.03], ['0','1','2','3'])
# gca().text(-0.15, 1.0, "A", transform = gca().transAxes, fontsize = 9)
legend(edgecolor = None, facecolor = None, frameon = False, loc = 'lower left', bbox_to_anchor = (0.35, 0.6))
# #########################################################################
# # I CONTROL 2
# #########################################################################
store = pd.HDFStore("../../figures/figures_articles_v2/figure6/determinant_corr_noSWS_shank_shuffled.h5", 'r')
det_all = store['det_all']
shufflings = store['shufflings']
store.close()
subplot(gsG[1,0])
simpleaxis(gca())
gca().text(-0.22, 1.2, "i", transform = gca().transAxes, fontsize = 9, fontweight='bold')
# colors = ['blue', 'red', 'green']
labels = ['WAKE', 'REM', 'NREM']
offset = [0.125, 0.125, 0.06]
for i, k in enumerate(['wak', 'rem', 'sws']):
shuf, x = np.histogram(1-shufflings[k], bins = 20, weights = np.ones(len(shufflings[k]))/len(shufflings[k]))
axvline(1-det_all[k], color = colors[i])
# plot([1-det_all[k], 1-det_all[k]], [0, 0.032], color = colors[i], label = labels[i])
plot(x[0:-1]+np.diff(x), shuf, color = colors[i], alpha = 1)
# hist(, label = k, histtype='stepfilled', facecolor = 'None', edgecolor = colors[i])
# gca().text(1-det_all[k], gca().get_ylim()[1], "p<0.001",fontsize = 7, ha = 'center', color = 'red')
gca().text(1-det_all[k], offset[i], labels[i], ha = 'center', fontsize = 7, bbox = dict(facecolor='white', edgecolor=colors[i],boxstyle='square,pad=0.2'))
axvline(0.33, color = 'black', linestyle = '--')
gca().text(0.33, 0.12, 'ALL', ha = 'center', fontsize = 7, bbox = dict(facecolor='white', edgecolor='black',boxstyle='square,pad=0.2'))
ylim(0, 0.16)
xlabel(r"Total correlation $\rho^{2}$")
ylabel("P (%)")
yticks([0,0.05,0.10,0.15], ['0','5','10','15'])
# gca().text(-0.15, 1.0, "A", transform = gca().transAxes, fontsize = 9)
legend(edgecolor = None, facecolor = None, frameon = False, loc = 'lower left', bbox_to_anchor = (0.35, 0.6))
store = pd.HDFStore("../../figures/figures_articles_v2/figure6/determinant_corr.h5", 'r')
det_all = store['det_all']
shufflings = store['shufflings']
shuffl_shank = store['shuffling_shank']
store.close()
shuf, x = np.histogram(1-shuffl_shank, bins = 20, weights = np.ones(len(shuffl_shank))/len(shuffl_shank))
plot(x[0:-1]+np.diff(x), shuf, '--', color = colors[-1], alpha = 0.7)
subplots_adjust(top = 0.98, bottom = 0.06, right = 0.99, left = 0.06)
savefig("../../figures/figures_articles_v4/figart_4.pdf", dpi = 900, facecolor = 'white')
os.system("evince ../../figures/figures_articles_v4/figart_4.pdf &")
|
gviejo/ThalamusPhysio
|
python/figure_article_v4/main_article_v4_fig_4.py
|
Python
|
gpl-3.0
| 26,031
|
[
"Gaussian"
] |
9b4be43838ce34a50430a045ce9be157afb3bb66f773a780802226ff0c009e62
|
#! /usr/bin/env python
"""A module for authenticating against and communicating with selected
parts of the Garmin Connect REST API.
"""
import json
import logging
import os
import re
import requests
from StringIO import StringIO
import sys
import zipfile
import dateutil.parser
import os.path
from functools import wraps
#
# Note: For more detailed information about the API services
# used by this module, log in to your Garmin Connect account
# through the web browser and visit the API documentation page
# for the REST service of interest. For example:
# https://connect.garmin.com/proxy/activity-service-1.3/index.html
# https://connect.garmin.com/proxy/activity-search-service-1.2/index.html
#
#
# Other useful references:
# https://github.com/cpfair/tapiriik/blob/master/tapiriik/services/GarminConnect/garminconnect.py
# https://forums.garmin.com/showthread.php?72150-connect-garmin-com-signin-question/page2
#
log = logging.getLogger(__name__)
# reduce logging noise from requests library
logging.getLogger("requests").setLevel(logging.ERROR)
def require_session(client_function):
"""Decorator that is used to annotate :class:`GarminClient`
methods that need an authenticated session before being called.
"""
@wraps(client_function)
def check_session(*args, **kwargs):
client_object = args[0]
if not client_object.session:
raise Exception("Attempt to use GarminClient without being connected. Call connect() before first use.'")
return client_function(*args, **kwargs)
return check_session
class GarminClient(object):
"""A client class used to authenticate with Garmin Connect and
extract data from the user account.
Since this class implements the context manager protocol, this object
can preferably be used together with the with-statement. This will
automatically take care of logging in to Garmin Connect before any
further interactions and logging out after the block completes or
a failure occurs.
Example of use: ::
with GarminClient("my.sample@sample.com", "secretpassword") as client:
ids = client.list_activity_ids()
for activity_id in ids:
gpx = client.get_activity_gpx(activity_id)
"""
def __init__(self, username, password):
"""Initialize a :class:`GarminClient` instance.
:param username: Garmin Connect user name or email address.
:type username: str
:param password: Garmin Connect account password.
:type password: str
"""
self.username = username
self.password = password
self.session = None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.disconnect()
def connect(self):
self.session = requests.Session()
self._authenticate()
def disconnect(self):
if self.session:
self.session.close()
self.session = None
def _authenticate(self):
log.info("authenticating user ...")
params = {
"service": "http://connect.garmin.com/post-auth/login",
"clientId": "GarminConnect",
"consumeServiceTicket": "false"
}
flow_execution_key = self._get_flow_execution_key(params)
log.debug("flow execution key: '{}'".format(flow_execution_key))
validation_url = self._get_auth_ticket(flow_execution_key, params)
log.debug("auth ticket validation url: {}".format(validation_url))
self._validate_auth_ticket(validation_url)
# referer seems to be a header that is required by the REST API
self.session.headers.update({'Referer': "https://some.random.site"})
def _get_flow_execution_key(self, request_params):
log.debug("get flow execution key ...")
response = self.session.get(
"https://sso.garmin.com/sso/login", params=request_params)
# parse out flowExecutionKey
flow_execution_key = re.search(
r'name="lt"\s+value="([^"]+)"', response.text).groups(1)[0]
return flow_execution_key
def _get_auth_ticket(self, flow_execution_key, request_params):
data = {
"username": self.username, "password": self.password,
"_eventId": "submit", "embed": "true", "lt": flow_execution_key
}
log.debug("single sign-on ...")
sso_response = self.session.post(
"https://sso.garmin.com/sso/login",
params=request_params, data=data, allow_redirects=False)
# response must contain an SSO ticket
ticket_match = re.search("ticket=([^']+)'", sso_response.text)
if not ticket_match:
raise ValueError("failed to get authentication ticket: "
"did you enter valid credentials?")
ticket = ticket_match.group(1)
log.debug("SSO ticket: {}".format(ticket))
# response should contain a URL where auth ticket can be validated
validation_url = re.search(
r"response_url\s+=\s+'([^']+)'", sso_response.text)
validation_url = validation_url.group(1)
return validation_url
def _validate_auth_ticket(self, validation_url):
log.debug("validating authentication ticket ...")
response = self.session.get(validation_url, allow_redirects=True)
if response.status_code == 200 or response.status_code == 404:
# for some reason a 404 response code can also denote a
# successful auth ticket validation
return
raise Exception(
u"failed to validate authentication ticket: {}:\n{}".format(
response.status_code, response.text))
@require_session
def list_activities(self):
"""Return all activity ids stored by the logged in user, along
with their starting timestamps.
:returns: The full list of activity identifiers, timestamps, and stationary flags (indicates no GPS/time track)
:rtype: list of (int, datetime, bool) tuples
"""
ids = []
batch_size = 100
# fetch in batches since the API doesn't allow more than a certain
# number of activities to be retrieved on every invocation
for start_index in xrange(0, sys.maxint, batch_size):
next_batch = self._fetch_activities(start_index, batch_size)
if not next_batch:
break
ids.extend(next_batch)
return ids
@require_session
def _fetch_activities(self, start_index, max_limit=100):
"""Return a sequence of activity info starting at a given index,
with index 0 being the user's most recently registered activity.
Should the index be out of bounds or the account empty, an empty
list is returned.
:param start_index: The index of the first activity to retrieve.
:type start_index: int
:param max_limit: The (maximum) number of activities to retrieve.
:type max_limit: int
:returns: A list of activity identifiers, timestamps, and stationary flags (indicates no GPS/time track)
:rtype: list of (int, datetime, bool) tuples
"""
log.info("fetching activities {} through {} ...".format(
start_index, start_index+max_limit-1))
response = self.session.get(
"https://connect.garmin.com/proxy/activity-search-service-1.2/json/activities", params={"start": start_index, "limit": max_limit})
if response.status_code != 200:
raise Exception(
u"failed to fetch activities {} to {} types: {}\n{}".format(
start_index, (start_index+max_limit-1),
response.status_code, response.text))
results = json.loads(response.text)["results"]
if not "activities" in results:
# index out of bounds or empty account
return []
entries = [ (int(entry["activity"]["activityId"]),
dateutil.parser.parse(entry["activity"]["activitySummary"]["BeginTimestamp"]["value"]),
# https://github.com/cpfair/tapiriik/blob/master/tapiriik/services/GarminConnect/garminconnect.py#L292
"SumSampleCountSpeed" not in entry["activity"]["activitySummary"] and "SumSampleCountTimestamp" not in entry["activity"]["activitySummary"])
for entry in results["activities"] ]
log.debug("got {} activities.".format(len(entries)))
return entries
@require_session
def get_activity_summary(self, activity_id):
"""Return a summary about a given activity. The
summary contains several statistics, such as duration, GPS starting
point, GPS end point, elevation gain, max heart rate, max pace, max
speed, etc).
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The activity summary as a JSON dict.
:rtype: dict
"""
response = self.session.get("https://connect.garmin.com/proxy/activity-service-1.3/json/activity/{}".format(activity_id))
if response.status_code != 200:
raise Exception(u"failed to fetch activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return json.loads(response.text)
@require_session
def get_activity_details(self, activity_id):
"""Return a JSON representation of a given activity including
available measurements such as location (longitude, latitude),
heart rate, distance, pace, speed, elevation.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The activity details as a JSON dict.
:rtype: dict
"""
# mounted at xml or json depending on result encoding
response = self.session.get("https://connect.garmin.com/proxy/activity-service-1.3/json/activityDetails/{}".format(activity_id))
if response.status_code != 200:
raise Exception(u"failed to fetch activity details for {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return json.loads(response.text)
@require_session
def get_activity_gpx(self, activity_id):
"""Return a GPX (GPS Exchange Format) representation of a
given activity. If the activity cannot be exported to GPX
(not yet observed in practice, but that doesn't exclude the
possibility), a :obj:`None` value is returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The GPX representation of the activity as an XML string
or ``None`` if the activity couldn't be exported to GPX.
:rtype: str
"""
response = self.session.get("https://connect.garmin.com/proxy/activity-service-1.3/gpx/course/{}".format(activity_id))
# An alternate URL that seems to produce the same results
# and is the one used when exporting through the Garmin
# Connect web page.
#response = self.session.get("https://connect.garmin.com/proxy/activity-service-1.1/gpx/activity/{}?full=true".format(activity_id))
if response.status_code == 404:
return None
if response.status_code != 200:
raise Exception(u"failed to fetch GPX for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return response.text
@require_session
def get_activity_tcx(self, activity_id):
"""Return a TCX (Training Center XML) representation of a
given activity. If the activity doesn't have a TCX source (for
example, if it was originally uploaded in GPX format, Garmin
won't try to synthesize a TCX file) a :obj:`None` value is
returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: The TCX representation of the activity as an XML string
or ``None`` if the activity cannot be exported to TCX.
:rtype: str
"""
response = self.session.get("https://connect.garmin.com/proxy/activity-service-1.1/tcx/activity/{}?full=true".format(activity_id))
if response.status_code == 404:
return None
if response.status_code != 200:
raise Exception(u"failed to fetch TCX for activity {}: {}\n{}".format(
activity_id, response.status_code, response.text))
return response.text
def get_original_activity(self, activity_id):
"""Return the original file that was uploaded for an activity.
If the activity doesn't have any file source (for example,
if it was entered manually rather than imported from a Garmin
device) then :obj:`(None,None)` is returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: A tuple of the file type (e.g. 'fit', 'tcx', 'gpx') and
its contents, or :obj:`(None,None)` if no file is found.
:rtype: (str, str)
"""
response = self.session.get("https://connect.garmin.com/proxy/download-service/files/activity/{}".format(activity_id))
if response.status_code == 404:
# Manually entered activity, no file source available
return (None,None)
if response.status_code != 200:
raise Exception(
u"failed to get original activity file {}: {}\n{}".format(
activity_id, response.status_code, response.text))
# return the first entry from the zip archive where the filename is
# activity_id (should be the only entry!)
zip = zipfile.ZipFile(StringIO(response.content), mode="r")
for path in zip.namelist():
fn, ext = os.path.splitext(path)
if fn==str(activity_id):
return ext[1:], zip.open(path).read()
return (None,None)
def get_activity_fit(self, activity_id):
"""Return a FIT representation for a given activity. If the activity
doesn't have a FIT source (for example, if it was entered manually
rather than imported from a Garmin device) a :obj:`None` value is
returned.
:param activity_id: Activity identifier.
:type activity_id: int
:returns: A string with a FIT file for the activity or :obj:`None`
if no FIT source exists for this activity (e.g., entered manually).
:rtype: str
"""
fmt, orig_file = self.get_original_activity(activity_id)
# if the file extension of the original activity file isn't 'fit',
# this activity was uploaded in a different format (e.g. gpx/tcx)
# and cannot be exported to fit
return orig_file if fmt=='fit' else None
@require_session
def upload_activity(self, file, format=None, name=None, description=None, activity_type=None, private=None):
"""Upload a GPX, TCX, or FIT file for an activity.
:param file: Path or open file
:param format: File format (gpx, tcx, or fit); guessed from filename if None
:param name: Optional name for the activity on Garmin Connect
:param description: Optional description for the activity on Garmin Connect
:param activity_type: Optional activityType key (lowercase: e.g. running, cycling)
:param private: If true, then activity will be set as private.
:returns: ID of the newly-uploaded activity
:rtype: int
"""
if isinstance(file, basestring):
file = open(file, "rb")
# guess file type if unspecified
fn = os.path.basename(file.name)
_, ext = os.path.splitext(fn)
if format is None:
if ext.lower() in ('.gpx','.tcx','.fit'):
format = ext.lower()[1:]
else:
raise Exception(u"could not guess file type for {}".format(fn))
# upload it
files = dict(data=(fn, file))
response = self.session.post("https://connect.garmin.com/proxy/upload-service-1.1/json/upload/.{}".format(format),
files=files)
# check response and get activity ID
if response.status_code != 200:
raise Exception(u"failed to upload {} for activity: {}\n{}".format(
format, response.status_code, response.text))
res = response.json()["detailedImportResult"]
if len(res["successes"])<1:
if res["failures"][0]["messages"] and res["failures"][0]["messages"][0]["content"].startswith("Duplicate"):
activity_id = res["failures"][0]["internalId"]
log.info("uploaded activity is a duplicate of {}".format(activity_id))
else:
raise Exception(u"failed to upload {} for activity: {}".format(format, str(res["failures"])))
elif len(res["successes"])>1:
raise Exception(u"uploaded {} file contained multiple activities".format(format))
else:
activity_id = res["successes"][0]["internalId"]
# add optional fields
fields = ( ('name',name,("display","value")),
('description',description,("display","value")),
('type',activity_type,("activityType","key")),
('privacy','private' if private else None,("definition","key")) )
for endpoint, value, path in fields:
if value is not None:
response = self.session.post("https://connect.garmin.com/proxy/activity-service-1.2/json/{}/{}".format(endpoint, activity_id),
data={'value':value})
if response.status_code != 200:
raise Exception(u"failed to set {} for activity {}: {}\n{}".format(
endpoint, activity_id, response.status_code, response.text))
j = response.json()
p0, p1 = path
if p0 not in j or j[p0][p1] != value:
raise Exception(u"failed to set {} for activity {}\n".format(endpoint, activity_id))
return activity_id
|
dlenski/garminexport
|
garminexport/garminclient.py
|
Python
|
apache-2.0
| 18,466
|
[
"VisIt"
] |
99e82cfbb49fbca7740bb1f199dcd69b55b2f4911c5ef022b27d778b721d3f6d
|
# -*- coding: utf-8 -*-
"""
Main FISSA user interface.
Authors:
- Sander W Keemink <swkeemink@scimail.eu>
- Scott C Lowe <scott.code.lowe@gmail.com>
"""
from __future__ import print_function
import collections
import datetime
import functools
import glob
import itertools
import os.path
import sys
import time
import warnings
try:
import collections.abc as cabc
except ImportError:
import collections as cabc
import numpy as np
from joblib import Parallel, delayed
from past.builtins import basestring
from scipy.io import savemat
from tqdm.auto import tqdm
from . import deltaf, extraction
from . import neuropil as npil
from . import roitools
def _pretty_timedelta(td=None, **kwargs):
"""
Represent a difference in time as a human-readable string.
Parameters
----------
td : datetime.timedelta, optional
The amount of time elapsed.
**kwargs
Additional arguments as per :class:`datetime.timedelta` constructor.
Returns
-------
str
Representation of the amount of time elapsed.
"""
if td is None:
td = datetime.timedelta(**kwargs)
elif not isinstance(td, datetime.timedelta):
raise ValueError(
"First argument should be a datetime.timedelta instance,"
" but {} was given.".format(type(td))
)
elif kwargs:
raise ValueError(
"Either a timedelta object or its arguments should be given, not both."
)
if td.total_seconds() < 2:
return "{:.3f} seconds".format(td.total_seconds())
if td.total_seconds() < 10:
return "{:.2f} seconds".format(td.total_seconds())
if td.total_seconds() < 60:
return "{:.1f} seconds".format(td.total_seconds())
if td.total_seconds() < 3600:
s = td.total_seconds()
m = int(s // 60)
s -= m * 60
return "{:d} min, {:.0f} sec".format(m, s)
# For durations longer than one hour, we use the default string
# representation for a datetime.timedelta, H:MM:SS.microseconds
return str(td)
def extract(
image,
rois,
nRegions=4,
expansion=1,
datahandler=None,
verbosity=1,
label=None,
total=None,
):
r"""
Extract data for all ROIs in a single 3d array or TIFF file.
.. versionadded:: 1.0.0
Parameters
----------
image : str or :term:`array_like` shaped ``(time, height, width)``
The imaging data.
Either a path to a multipage TIFF file, or 3d :term:`array_like` data.
rois : str or :term:`list` of :term:`array_like`
The regions-of-interest, specified by
either a string containing a path to an ImageJ roi zip file,
or a list of arrays encoding polygons, or list of binary arrays
representing masks.
nRegions : int, default=4
Number of neuropil regions to draw. Use a higher number for
densely labelled tissue. Default is ``4``.
expansion : float, default=1
Expansion factor for the neuropil region, relative to the
ROI area. Default is ``1``. The total neuropil area will be
``nRegions * expansion * area(ROI)``.
datahandler : fissa.extraction.DataHandlerAbstract, optional
A datahandler object for handling ROIs and calcium data.
The default is :class:`~fissa.extraction.DataHandlerTifffile`.
verbosity : int, default=1
Level of verbosity. The options are:
- ``0``: No outputs.
- ``1``: Print extraction start.
- ``2``: Print extraction end.
- ``3``: Print start of each step within the extraction process.
label : str or int, optional
The label for the current trial. Only used for reporting progress.
total : int, optional
Total number of trials. Only used for reporting progress.
Returns
-------
traces : :class:`numpy.ndarray` shaped ``(n_rois, nRegions + 1, n_frames)``
The raw signal, determined as the average fluorence trace extracted
from each ROI and neuropil region.
Each vector ``traces[i_roi, 0, :]`` contains the traces for the
``i_roi``-th ROI.
The following `nRegions` arrays in ``traces[i_roi, 1 : nRegions + 1, :]``
contain the traces from the `nRegions` grown neuropil regions
surrounding the ``i_roi``-th ROI.
polys : list of list of list of :class:`numpy.ndarray` shaped ``(n_nodes, 2)``
Polygon contours describing the outline of each region.
For contiguous ROIs, the outline of the ``i_roi``-th ROI is described
by the array at ``polys[i_roi][0][0]``. This array is ``n_nodes``
rows, each representing the coordinate of a node in ``(y, x)`` format.
For non-contiguous ROIs, a contour is needed for each disconnected
polygon making up the total aggregate ROI. These contours are found at
``polys[i_roi][0][i_contour]``.
Similarly, the `nRegions` neuropil regions are each described by the
polygons ``polys[i_roi][i_neurpil + 1][i_contour]`` respectively.
mean : :class:`numpy.ndarray` shaped (height, width)
Mean image.
"""
# Get the timestamp for program start
t0 = time.time()
mheader = ""
if verbosity >= 1:
# Set up message header
# Use the label, if this was provided
if label is None:
header = ""
elif isinstance(label, int) and isinstance(total, int):
# Pad left based on the total number of jobs, so it is [ 1/10] etc
fmtstr = "{:" + str(int(np.maximum(1, np.ceil(np.log10(total))))) + "d}"
header = fmtstr.format(label + 1)
else:
header = str(label)
# Try to label with [1/5] to indicate progess, if possible
if header and total is not None:
header += "/{}".format(total)
if header:
header = "[Extraction " + header + "] "
# Try to include the path to the image as a footer
footer = ""
if isinstance(image, basestring):
# Include the image path as a footer
footer = " ({})".format(image)
# Done with header and footer
# Inner header is indented further
mheader = " " + header
# Build intro message
message = header + "Extraction starting" + footer
# Wait briefly to prevent messages colliding when using multiprocessing
if isinstance(label, int) and label < 12:
time.sleep(label / 50.0)
print(message)
sys.stdout.flush()
if datahandler is None:
datahandler = extraction.DataHandlerTifffile()
# get data as arrays and rois as masks
if verbosity >= 3:
print("{}Loading imagery".format(mheader))
curdata = datahandler.image2array(image)
if verbosity >= 3:
print("{}Converting ROIs to masks".format(mheader))
base_masks = datahandler.rois2masks(rois, curdata)
# get the mean image
mean = datahandler.getmean(curdata)
if verbosity == 3:
print("{}Growing neuropil regions and extracting traces".format(mheader))
# Initialise output variables
traces = []
polys = []
# get neuropil masks and extract signals
for base_mask in tqdm(
base_masks,
total=len(base_masks),
desc="{}Neuropil extraction".format(mheader),
disable=verbosity < 4,
):
# neuropil masks
npil_masks = roitools.getmasks_npil(
base_mask, nNpil=nRegions, expansion=expansion
)
# add all current masks together
masks = [base_mask] + npil_masks
# extract traces
traces.append(datahandler.extracttraces(curdata, masks))
# store ROI outlines
polys.append([roitools.find_roi_edge(mask) for mask in masks])
# Convert traces from a list to a single numpy array
traces = np.stack(traces, axis=0)
if verbosity >= 2:
# Build end message
message = header + "Extraction finished" + footer
message += " in {}".format(_pretty_timedelta(seconds=time.time() - t0))
print(message)
sys.stdout.flush()
return traces, polys, mean
def separate_trials(
raw,
alpha=0.1,
max_iter=20000,
tol=1e-4,
max_tries=1,
method="nmf",
verbosity=1,
label=None,
total=None,
):
r"""
Separate signals within a set of 2d arrays.
.. versionadded:: 1.0.0
Parameters
----------
raw : list of n_trials :term:`array_like`, each shaped ``(nRegions + 1, observations)``
Raw signals.
A list of 2-d arrays, each of which contains observations of mixed
signals, mixed in the same way across all trials.
The `nRegions` signals must be the same for each trial, and the 0-th
region, ``raw[trial][0]``, should be from the region of interest for
which a matching source signal should be identified.
alpha : float, default=0.1
Sparsity regularizaton weight for NMF algorithm. Set to zero to
remove regularization. Default is ``0.1``.
(Only used for ``method="nmf"``.)
max_iter : int, default=20000
Maximum number of iterations before timing out on an attempt.
tol : float, default=1e-4
Tolerance of the stopping condition.
max_tries : int, default=1
Maximum number of random initial states to try. Each random state will
be optimized for `max_iter` iterations before timing out.
method : {"nmf", "ica"}, default="nmf"
Which blind source-separation method to use. Either ``"nmf"``
for non-negative matrix factorization, or ``"ica"`` for
independent component analysis. Default is ``"nmf"``.
verbosity : int, default=1
Level of verbosity. The options are:
- ``0``: No outputs.
- ``1``: Print separation start.
- ``2``: Print separation end.
- ``3``: Print progress details during separation.
label : str or int, optional
Label/name or index of the ROI currently being processed.
Only used for progress messages.
total : int, optional
Total number of ROIs. Only used for reporting progress.
Returns
-------
Xsep : list of n_trials :class:`numpy.ndarray`, each shaped ``(nRegions + 1, observations)``
The separated signals, unordered.
Xmatch : list of n_trials :class:`numpy.ndarray`, each shaped ``(nRegions + 1, observations)``
The separated traces, ordered by matching score against the raw ROI
signal.
Xmixmat : :class:`numpy.ndarray`, shaped ``(nRegions + 1, nRegions + 1)``
Mixing matrix.
convergence : dict
Metadata for the convergence result, with the following keys and
values:
converged : bool
Whether the separation model converged, or if it ended due to
reaching the maximum number of iterations.
iterations : int
The number of iterations which were needed for the separation model
to converge.
max_iterations : int
Maximum number of iterations to use when fitting the
separation model.
random_state : int or None
Random seed used to initialise the separation model.
"""
# Get the timestamp for program start
t0 = time.time()
header = ""
if verbosity >= 1:
# Set up message header
# Use the label, if this was provided
if label is None:
header = ""
elif isinstance(label, int) and isinstance(total, int):
# Pad left based on the total number of jobs, so it is [ 1/10] etc
fmtstr = "{:" + str(int(np.maximum(1, np.ceil(np.log10(total))))) + "d}"
header = fmtstr.format(label + 1)
else:
header = str(label)
# Try to label with [1/5] to indicate progess, if possible
if header and total is not None:
header += "/{}".format(total)
if header:
header = "[Separation " + header + "] "
# Include the ROI label as a footer
footer = ""
if isinstance(label, int) and isinstance(total, int):
# Include the ROI label as a footer
footer = " (ROI {})".format(label)
# Done with header and footer
# Build intro message
message = header + "Signal separation starting" + footer
# Wait briefly to prevent messages colliding when using multiprocessing
if isinstance(label, int) and label < 12:
time.sleep(label / 50.0)
print(message)
sys.stdout.flush()
# Join together the raw data across trials, collapsing down the trials
X = np.concatenate(raw, axis=1)
# Check for values below 0
if X.min() < 0:
message_extra = ""
if label is not None:
message_extra = " for ROI {}".format(label)
warnings.warn(
"{}Found values below zero in raw signal{}. Offsetting so minimum is 0."
"".format(header, message_extra)
)
X -= X.min()
# Separate the signals
Xsep, Xmatch, Xmixmat, convergence = npil.separate(
X,
method,
max_iter=max_iter,
tol=tol,
max_tries=max_tries,
alpha=alpha,
verbosity=verbosity - 2,
prefix=" " + header,
)
# Unravel observations from multiple trials into a list of arrays
trial_lengths = [r.shape[1] for r in raw]
indices = np.cumsum(trial_lengths[:-1])
Xsep = np.split(Xsep, indices, axis=1)
Xmatch = np.split(Xmatch, indices, axis=1)
# Report status
if verbosity >= 2:
# Build end message
message = header + "Signal separation finished" + footer
message += " in {}".format(_pretty_timedelta(seconds=time.time() - t0))
print(message)
sys.stdout.flush()
return Xsep, Xmatch, Xmixmat, convergence
class Experiment:
r"""
FISSA Experiment.
Uses the methodology described in
`FISSA: A neuropil decontamination toolbox for calcium imaging signals <doi_>`_.
.. _doi: https://www.doi.org/10.1038/s41598-018-21640-2
Parameters
----------
images : str or list
The raw imaging data.
Should be one of:
- the path to a directory containing TIFF files (string),
- a list of paths to TIFF files (list of strings),
- a list of :term:`array_like` data already loaded into memory,
each shaped ``(n_frames, height, width)``.
Note that each TIFF or array is considered a single trial.
rois : str or list
The region of interest (ROI) definitions.
Should be one of:
- the path to a directory containing ImageJ ZIP files (string),
- the path of a single ImageJ ZIP file (string),
- a list of ImageJ ZIP files (list of strings),
- a list of arrays, each encoding a ROI polygons,
- a list of lists of binary arrays, each representing a ROI mask.
This can either be a single roiset for all trials, or a different
roiset for each trial.
folder : str, optional
Path to a cache directory from which pre-extracted data will
be loaded if present, and saved to otherwise. If `folder` is
unset, the experiment data will not be saved.
nRegions : int, default=4
Number of neuropil regions and signals to use. Default is ``4``.
Use a higher number for densely labelled tissue.
expansion : float, default=1
Expansion factor for each neuropil region, relative to the
ROI area. Default is ``1``. The total neuropil area will be
``nRegions * expansion * area(ROI)``.
method : "nmf" or "ica", default="nmf"
Which blind source-separation method to use. Either ``"nmf"``
for non-negative matrix factorization, or ``"ica"`` for
independent component analysis. Default is ``"nmf"`` (recommended).
alpha : float, default=0.1
Sparsity regularizaton weight for NMF algorithm. Set to zero to
remove regularization. Default is ``0.1``.
max_iter : int, default=20000
Maximum number of iterations before timing out on an attempt.
.. versionadded:: 1.0.0
tol : float, default=1e-4
Tolerance of the stopping condition.
.. versionadded:: 1.0.0
max_tries : int, default=1
Maximum number of random initial states to try. Each random state will
be optimized for `max_iter` iterations before timing out.
.. versionadded:: 1.0.0
ncores_preparation : int or None, default=-1
The number of parallel subprocesses to use during the data
preparation steps of :meth:`separation_prep`.
These steps are ROI and neuropil subregion definitions, and extracting
raw signals from TIFFs.
If set to ``None`` or ``-1`` (default), the number of processes used
will equal the number of threads on the machine.
If this is set to ``-2``, the number of processes used will be one less
than the number of threads on the machine; etc.
Note that the preparation process can be quite memory-intensive and it
may be necessary to reduce the number of processes from the default.
ncores_separation : int or None, default=-1
The number of parallel subprocesses to use during the signal
separation steps of :meth:`separate`.
If set to ``None`` or ``-1`` (default), the number of processes used
will equal the number of threads on the machine.
If this is set to ``-2``, the number of processes used will be one less
than the number of threads on the machine; etc.
The separation routine requires less memory per process than
the preparation routine, and so `ncores_separation` be often be set
higher than `ncores_preparation`.
lowmemory_mode : bool, optional
If ``True``, FISSA will load TIFF files into memory frame-by-frame
instead of holding the entire TIFF in memory at once. This
option reduces the memory load, and may be necessary for very
large inputs. Default is ``False``.
datahandler : :class:`fissa.extraction.DataHandlerAbstract`, optional
A custom datahandler object for handling ROIs and calcium data can
be given here. See :mod:`fissa.extraction` for example datahandler
classes. The default datahandler is
:class:`~fissa.extraction.DataHandlerTifffile`.
If `datahandler` is set, the `lowmemory_mode` parameter is
ignored.
verbosity : int, default=1
How verbose the processing will be. Increase for more output messages.
Processing is silent if ``verbosity=0``.
.. versionadded:: 1.0.0
Attributes
----------
result : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a :class:`numpy.ndarray` shaped
``(n_signals, n_timepoints)``.
The final output of FISSA, with separated signals ranked in order of
their weighting toward the raw cell ROI signal relative to their
weighting toward other mixed raw signals.
The ordering is such that ``experiment.result[roi, trial][0, :]``
is the signal with highest score in its contribution to the raw
neuronal signal.
Subsequent signals are sorted in order of diminishing score.
The units are same as `raw` (candelas per unit area).
This field is only populated after :meth:`separate` has been run; until
then, it is set to ``None``.
roi_polys : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a list of length ``nRegions + 1``, each element of
which is a list of length ``n_contour`` containing a :class:`numpy.ndarray`
of shape ``(n_nodes, 2)``.
Polygon contours describing the outline of each region.
For contiguous ROIs, the outline of the ``i_roi``-th ROI used in the
``i_trial``-th trial is described by the array at
``experiment.roi_polys[i_roi, i_trial][0][0]``.
This array consists of ``n_nodes`` rows, each representing the
coordinate of a node in ``(y, x)`` format.
For non-contiguous ROIs, a contour is needed for each disconnected
polygon making up the total aggregate ROI. These contours are found at
``experiment.roi_polys[i_roi, i_trial][0][i_contour]``.
Similarly, the `nRegions` neuropil regions are each described by the
polygons
``experiment.roi_polys[i_roi, i_trial][i_neurpil + 1][i_contour]``,
respectively.
means : list of `n_trials` :class:`numpy.ndarray`, each shaped ``(height, width)``
The temporal-mean image for each trial (i.e. for each TIFF file,
the average image over all of its frames).
raw : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a :class:`numpy.ndarray` shaped
``(n_signals, n_timepoints)``.
For each ROI and trial (``raw[i_roi, i_trial]``) we extract a temporal
trace of the average value within the spatial area of each of the
``nRegions + 1`` regions.
The 0-th region is the ``i_roi``-th ROI (``raw[i_roi, i_trial][0]``).
The subsequent ``nRegions`` vectors are the traces for each of the
neuropil regions.
The units are the same as the supplied imagery (candelas per unit
area).
sep : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a :class:`numpy.ndarray` shaped
``(n_signals, n_timepoints)``.
The separated signals, before output signals are ranked according to
their matching against the raw signal from within the ROI.
Separated signal ``i`` for a specific ROI and trial can be found at
``experiment.sep[roi, trial][i, :]``.
This field is only populated after :meth:`separate` has been run; until
then, it is set to ``None``.
mixmat : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a :class:`numpy.ndarray` shaped
``(n_rois, n_signals)``.
The mixing matrix, which maps from ``experiment.raw`` to
``experiment.sep``.
Because we use the collate the traces from all trials to determine
separate the signals, the mixing matrices for a given ROI are the
same across all trials.
This means all ``n_trials`` elements in ``mixmat[i_roi, :]`` are
identical.
This field is only populated after :meth:`separate` has been run; until
then, it is set to ``None``.
info : :class:`numpy.ndarray` shaped ``(n_rois, n_trials)`` of dicts
Information about the separation routine.
Each dictionary in the array has the following fields:
converged : bool
Whether the separation model converged, or if it ended due to
reaching the maximum number of iterations.
iterations : int
The number of iterations which were needed for the separation model
to converge.
max_iterations : int
Maximum number of iterations to use when fitting the
separation model.
random_state : int or None
Random seed used to initialise the separation model.
This field is only populated after :meth:`separate` has been run; until
then, it is set to ``None``.
deltaf_raw : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a :class:`numpy.ndarray` shaped ``(1, n_timepoint)``.
The amount of change in fluorence relative to the baseline fluorence
(Δf/f\ :sub:`0`).
This field is only populated after :meth:`calc_deltaf` has been run;
until then, it is set to ``None``.
.. versionchanged:: 1.0.0
The shape of the interior arrays changed from ``(n_timepoint, )``
to ``(1, n_timepoint)``.
deltaf_result : :class:`numpy.ndarray`
A :class:`numpy.ndarray` of shape ``(n_rois, n_trials)``, each element
of which is itself a :class:`numpy.ndarray` shaped
``(n_signals, n_timepoints)``.
The amount of change in fluorence relative to the baseline fluorence
(Δf/f\ :sub:`0`).
By default, the baseline is taken from :attr:`raw` because the
minimum values in :attr:`result` are typically zero.
See :meth:`calc_deltaf` for details.
This field is only populated after :meth:`calc_deltaf` has been run;
until then, it is set to ``None``.
"""
_defaults = {
"nRegions": 4,
"expansion": 1,
"method": "nmf",
"alpha": 0.1,
"max_iter": 20000,
"tol": 1e-4,
"max_tries": 1,
}
_preparation_params = ["nRegions", "expansion"]
_separation_params = ["method", "alpha", "max_iter", "tol", "max_tries"]
_preparation_outputs = ["means", "raw", "roi_polys"]
_separation_outputs = ["info", "mixmat", "result", "sep"]
_deltaf_outputs = ["deltaf_raw", "deltaf_result"]
def __init__(
self,
images,
rois,
folder=None,
nRegions=None,
expansion=None,
method=None,
alpha=None,
max_iter=None,
tol=None,
max_tries=None,
ncores_preparation=-1,
ncores_separation=-1,
lowmemory_mode=False,
datahandler=None,
verbosity=1,
):
# Initialise internal variables
self.clear(verbosity=0)
if isinstance(images, basestring):
self.images = sorted(glob.glob(os.path.join(images, "*.tif*")))
elif isinstance(images, cabc.Sequence):
self.images = images
else:
raise ValueError("images should either be string or list")
if isinstance(rois, basestring):
if rois[-3:] == "zip":
self.rois = [rois] * len(self.images)
else:
self.rois = sorted(glob.glob(os.path.join(rois, "*.zip")))
elif isinstance(rois, cabc.Sequence):
self.rois = rois
if len(rois) == 1: # if only one roiset is specified
self.rois *= len(self.images)
else:
raise ValueError("rois should either be string or list")
if datahandler is not None and lowmemory_mode:
raise ValueError(
"Only one of lowmemory_mode and datahandler should be set."
)
elif lowmemory_mode:
self.datahandler = extraction.DataHandlerTifffileLazy()
else:
self.datahandler = datahandler
# define class variables
self.folder = folder
self.nRegions = nRegions
self.expansion = expansion
self.method = method
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.max_tries = max_tries
self.ncores_preparation = ncores_preparation
self.ncores_separation = ncores_separation
self.verbosity = verbosity
# check if any data already exists
if folder is None:
pass
elif folder and not os.path.exists(folder):
os.makedirs(folder)
else:
self.load()
@property
def nCell(self):
if getattr(self, "result", None) is not None:
return self.result.shape[0]
if getattr(self, "raw", None) is not None:
return self.raw.shape[0]
return None
@property
def nTrials(self):
return len(self.images)
def __setattr__(self, name, value):
def check_same_value():
if not hasattr(self, name):
return False
current = getattr(self, name)
if type(current) is not type(value):
return False
if isinstance(current, np.ndarray):
return np.array_equal(current, value)
return current == value
if getattr(self, name, None) is None:
# No need to clear if the current value is None
pass
elif name in ["images", "rois"]:
if not check_same_value():
self.clear()
elif hasattr(self, "_preparation_params") and name in self._preparation_params:
if not check_same_value():
self.clear()
elif hasattr(self, "_separation_params") and name in self._separation_params:
if not check_same_value():
self.clear_separated()
self.__dict__[name] = value
def __str__(self):
if isinstance(self.images, basestring):
str_images = repr(self.images)
elif isinstance(self.images, cabc.Sequence):
str_images = "<{} of length {}>".format(
self.images.__class__.__name__, len(self.images)
)
else:
str_images = repr(self.images)
if isinstance(self.rois, basestring):
str_rois = repr(self.rois)
elif isinstance(self.rois, cabc.Sequence):
str_rois = "<{} of length {}>".format(
self.rois.__class__.__name__, len(self.rois)
)
else:
str_images = repr(self.rois)
fields = (
["folder"]
+ self._preparation_params
+ self._separation_params
+ ["ncores_preparation", "ncores_separation", "datahandler", "verbosity"]
)
str_parts = [
"{}={}".format(field, repr(getattr(self, field))) for field in fields
]
return "{}.{}(images={}, rois={}, {})".format(
__name__,
self.__class__.__name__,
str_images,
str_rois,
", ".join(str_parts),
)
def __repr__(self):
fields = (
["images", "rois", "folder"]
+ self._preparation_params
+ self._separation_params
+ ["ncores_preparation", "ncores_separation", "datahandler", "verbosity"]
)
repr_parts = [
"{}={}".format(field, repr(getattr(self, field))) for field in fields
]
return "{}.{}({})".format(
__name__, self.__class__.__name__, ", ".join(repr_parts)
)
def clear(self, verbosity=None):
r"""
Clear prepared data, and all data downstream of prepared data.
.. versionadded:: 1.0.0
Parameters
----------
verbosity : int, optional
Whether to show the data fields which were cleared.
By default, the object's :attr:`verbosity` attribute is used.
"""
if verbosity is None:
verbosity = getattr(self, "verbosity", 1) - 1
keys = self._preparation_outputs + ["deltaf_raw"]
# Wipe outputs
keys_cleared = []
for key in keys:
if getattr(self, key, None) is not None:
keys_cleared.append(key)
setattr(self, key, None)
if verbosity >= 1 and keys_cleared:
print("Cleared {}".format(", ".join(repr(k) for k in keys_cleared)))
# Wipe outputs of separate(), as they no longer match self.raw
self.clear_separated(verbosity=verbosity)
def clear_separated(self, verbosity=None):
r"""
Clear separated data, and all data downstream of separated data.
.. versionadded:: 1.0.0
Parameters
----------
verbosity : int, optional
Whether to show the data fields which were cleared.
By default, the object's :attr:`verbosity` attribute is used.
"""
if verbosity is None:
verbosity = getattr(self, "verbosity", 1) - 1
keys = self._separation_outputs + ["deltaf_result"]
# Wipe outputs
keys_cleared = []
for key in keys:
if getattr(self, key, None) is not None:
keys_cleared.append(key)
setattr(self, key, None)
if verbosity >= 1 and keys_cleared:
print("Cleared {}".format(", ".join(repr(k) for k in keys_cleared)))
def _adopt_default_parameters(self, only_preparation=False, force=False):
r"""
Adopt default values for unset analysis parameters.
.. versionadded:: 1.0.0
Parameters
----------
only_preparation : bool, optional
Whether to restrict the parameters to only those used for data
extraction during the preparation step. Default is ``False``.
force : bool, optional
If `True`, all parameters will be overridden with default values
even if they had already been set. Default is ``False``.
"""
defaults = self._defaults
if only_preparation:
# Prune down to only the preparation parameters
defaults = {
k: v for k, v in defaults.items() if k in self._preparation_params
}
# Check through each parameter and set unset values from defaults
keys_adopted = []
for key, value in defaults.items():
if getattr(self, key, None) is not None and not force:
continue
setattr(self, key, value)
keys_adopted.append(key)
if self.verbosity >= 5 and keys_adopted:
print(
"Adopted default values for {}".format(
", ".join(repr(k) for k in keys_adopted)
)
)
def load(self, path=None, force=False, skip_clear=False):
r"""
Load data from cache file in npz format.
.. versionadded:: 1.0.0
Parameters
----------
path : str, optional
Path to cache file (.npz format) or a directory containing
``"prepared.npz"`` and/or ``"separated.npz"`` files.
Default behaviour is to use the :attr:`folder` parameter which was
provided when the object was initialised is used
(``experiment.folder``).
force : bool, optional
Whether to load the cache even if its experiment parameters differ
from the properties of this experiment. Default is ``False``.
skip_clear : bool, optional
Whether to skip clearing values before loading. Default is ``False``.
"""
dynamic_properties = ["nCell", "nTrials"]
ValGroup = collections.namedtuple(
"ValGroup",
["category", "validators", "fields", "clearif", "clearfn"],
)
validation_groups = [
ValGroup(
"prepared",
self._preparation_params,
self._preparation_outputs + ["deltaf_raw"],
["raw"],
self.clear,
),
ValGroup(
"separated",
self._preparation_params + self._separation_params,
self._separation_outputs + ["deltaf_result"],
["result"],
self.clear_separated,
),
]
if path is None:
if self.folder is None:
raise ValueError(
"path must be provided if experiment folder is not defined"
)
path = self.folder
if os.path.isdir(path) or path == "":
for fname in ("prepared.npz", "separated.npz"):
fullfname = os.path.join(path, fname)
if not os.path.exists(fullfname):
continue
self.load(fullfname)
return
if self.verbosity >= 1:
print("Loading data from cache {}".format(path))
cache = np.load(path, allow_pickle=True)
def _unpack_scalar(x):
if np.array_equal(x, None):
return None
if x.ndim == 0:
# Handle loading scalars
return x.item()
return x
if force:
for field in cache.files:
if field in dynamic_properties:
continue
setattr(self, field, _unpack_scalar(cache[field]))
return
set_fields = set()
for category, validators, fields, clearif, clearfn in validation_groups:
valid = True
validation_errors = []
for validator in validators:
if getattr(self, validator, None) is None:
# If the validator is not yet set locally, it is fine to
# overwrite it.
continue
if validator not in cache:
# If the validator is not set in the cache and is set
# locally, we can't verify that the cached data is
# compatible. We don't raise an error for this because the
# contents are probably not this category.
valid = False
break
value = _unpack_scalar(cache[validator])
if value is None:
valid = False
break
if not np.array_equal(getattr(self, validator), value):
# If the validator is set and doesn't match the value in
# the cache, we will raise an error.
validation_errors.append(
" {}: Experiment (ours) {}, Cache (theirs) {}".format(
validator,
getattr(self, validator),
value,
)
)
if len(validation_errors) > 0:
raise ValueError(
"Experiment parameter value(s) in {} do not match the"
" current experiment values:\n{}".format(
path, "\n".join(validation_errors)
)
)
if not valid:
continue
# Check the image and roi size is appropriate
for k in ["raw", "result"]:
if k not in cache.files or np.array_equal(cache[k], None):
continue
if cache[k].shape[1] != self.nTrials:
raise ValueError(
"Data mismatch between {} and our images."
" Cached {} has {} trials, but our Experiment has {}"
" trials.".format(path, k, cache[k].shape[1], self.nTrials)
)
if self.nCell is not None and cache[k].shape[0] != self.nCell:
raise ValueError(
"Data mismatch between {} and our roisets."
" Cached {} has {} ROIs, but our Experiment has {}"
" ROIs.".format(path, k, cache[k].shape[1], self.nCell)
)
# Wipe the values currently held before setting new values
if not skip_clear:
for field in clearif:
if field in cache.files:
clearfn()
break
# All the validators were valid, so we are okay to load the fields
#
# Check to see if there are any fields to load. If not, we won't
# load the validators.
any_field_to_load = False
for field in cache.files:
if field in dynamic_properties:
continue
if field in fields:
any_field_to_load = True
# If we don't have any data to load, no need to set the validators
# or print that we loaded something.
if not any_field_to_load:
continue
# Load all the validators, overwriting our local values if None.
# We do this before loading in the data fields because of automatic
# clear when parameter attributes change.
for validator in validators:
if validator not in cache.files:
continue
value = _unpack_scalar(cache[validator])
if getattr(self, validator, None) is None:
if self.verbosity >= 2:
print(
" Adopting value {}={} from {}".format(
validator, repr(value), path
)
)
setattr(self, validator, value)
set_fields.add(validator)
# Load all the fields
for field in fields:
if field not in cache or field in dynamic_properties:
continue
setattr(self, field, _unpack_scalar(cache[field]))
set_fields.add(field)
if self.verbosity >= 2:
print("Loaded {} data from {}".format(category, path))
# Check there weren't any left over fields in the cache which
# were left unloaded
unset_fields = []
for field in cache.files:
if field in dynamic_properties:
continue
if field not in set_fields:
unset_fields.append(field)
if len(unset_fields) > 0 and self.verbosity >= 1:
print(
"Warning: field(s) {} in {} were not loaded.".format(unset_fields, path)
)
def separation_prep(self, redo=False):
r"""
Prepare and extract the data to be separated.
For each trial, performs the following steps:
- Load in data as arrays.
- Load in ROIs as masks.
- Grow and seaparate ROIs to define neuropil regions.
- Using neuropil and original ROI regions, extract traces from data.
After running this you can access the raw data (i.e. pre-separation)
as ``experiment.raw`` and ``experiment.rois``.
``experiment.raw`` is a list of arrays.
``experiment.raw[roi, trial]`` gives you the traces of a specific ROI
and trial, across the ROI and neuropil regions.
``experiment.roi_polys`` is a list of lists of arrays.
``experiment.roi_polys[roi, trial][region][0]`` gives you the
polygon for the region for a specific ROI, trial and region.
``region=0`` is the ROI itself (i.e. the outline of the neuron cell),
and ``region>0`` gives the different neuropil regions.
For separable masks, it is possible multiple outlines are
found, which can be accessed as
``experiment.roi_polys[roi, trial][region][i]``,
where ``i`` is the outline index.
Parameters
----------
redo : bool, optional
If ``False``, we load previously prepared data when possible.
If ``True``, we re-run the preparation, even if it has previously
been run. Default is ``False``.
"""
# Get the timestamp for program start
t0 = time.time()
# define filename where data will be present
if self.folder is None:
fname = None
redo = True
else:
fname = os.path.join(self.folder, "prepared.npz")
# try to load data from filename
if fname is None or not os.path.isfile(fname):
redo = True
if not redo:
try:
self.clear()
self.load(fname)
if self.raw is not None:
return
except BaseException as err:
print("An error occurred while loading {}".format(fname))
print(err)
print("Extraction will be redone and {} overwritten".format(fname))
# Wipe outputs
self.clear()
# Adopt default values
self._adopt_default_parameters(only_preparation=True)
# Extract signals
n_trial = len(self.images)
if self.verbosity >= 2:
msg = "Doing region growing and data extraction for {} trials...".format(
n_trial
)
msg += "\n Images:"
for image in self.images:
if self.verbosity >= 4 or isinstance(image, basestring):
msg += "\n {}".format(image)
else:
msg += "\n {}".format(image.__class__)
msg += "\n ROI sets:"
for roiset in self.rois:
if self.verbosity >= 4 or isinstance(roiset, basestring):
msg += "\n {}".format(roiset)
else:
msg += "\n {}".format(roiset.__class__)
for key in self._preparation_params:
msg += "\n {}: {}".format(key, repr(getattr(self, key)))
print(msg)
sys.stdout.flush()
# Make a handle to the extraction function with parameters configured
_extract_cfg = functools.partial(
extract,
nRegions=self.nRegions,
expansion=self.expansion,
datahandler=self.datahandler,
verbosity=self.verbosity - 2,
total=n_trial,
)
# check whether we should show progress bars
disable_progressbars = self.verbosity <= 0 or 3 <= self.verbosity
# Check how many workers to spawn.
# Map the behaviour of ncores=None to one job per CPU core, like for
# multiprocessing.Pool(processes=None). With joblib, this is
# joblib.Parallel(n_jobs=-1) instead.
n_jobs = -1 if self.ncores_preparation is None else self.ncores_preparation
if 0 <= n_jobs <= 1:
# Don't use multiprocessing
outputs = [
_extract_cfg(image, rois, label=i)
for i, (image, rois) in tqdm(
enumerate(zip(self.images, self.rois)),
total=n_trial,
desc="Extracting traces",
disable=disable_progressbars,
)
]
else:
# Use multiprocessing
outputs = Parallel(
n_jobs=n_jobs, backend="threading", verbose=max(0, self.verbosity - 5)
)(
delayed(_extract_cfg)(image, rois, label=i)
for i, (image, rois) in tqdm(
enumerate(zip(self.images, self.rois)),
total=n_trial,
desc="Extracting traces",
disable=disable_progressbars,
)
)
# get number of cells
n_roi = len(outputs[0][1])
# predefine data structures
raw = np.empty((n_roi, n_trial), dtype=object)
roi_polys = np.empty_like(raw)
# Set outputs
means = []
for i_trial, (raw_i, polys_i, mean_i) in enumerate(outputs):
means.append(mean_i)
for i_roi in range(n_roi):
raw[i_roi][i_trial] = raw_i[i_roi]
roi_polys[i_roi][i_trial] = polys_i[i_roi]
self.raw = raw
self.roi_polys = roi_polys
self.means = means
if self.verbosity >= 1:
print(
"Finished extracting raw signals from {} ROIs across {} trials in {}.".format(
n_roi,
n_trial,
_pretty_timedelta(seconds=time.time() - t0),
)
)
sys.stdout.flush()
# Maybe save to cache file
if self.folder is not None:
self.save_prep()
def save_prep(self, destination=None):
r"""
Save prepared raw signals, extracted from images, to an npz file.
.. versionadded:: 1.0.0
Parameters
----------
destination : str, optional
Path to output file. The default destination is
``"prepared.npz"`` within the cache directory
``experiment.folder``.
"""
fields = set(self._preparation_params + self._preparation_outputs)
if destination is None:
if self.folder is None:
raise ValueError(
"The folder attribute must be declared in order to save"
" preparation outputs the cache."
)
destination = os.path.join(self.folder, "prepared.npz")
if self.verbosity >= 1:
print("Saving extracted traces to {}".format(destination))
sys.stdout.flush()
destdir = os.path.dirname(destination)
if destdir and not os.path.isdir(destdir):
os.makedirs(destdir)
np.savez_compressed(
destination,
**{
field: getattr(self, field)
for field in fields
if getattr(self, field) is not None
}
)
def separate(self, redo_prep=False, redo_sep=False):
r"""
Separate all the trials with FISSA algorithm.
After running ``separate``, data can be found as follows:
experiment.sep
Raw separation output, without being matched. Signal ``i`` for
a specific ROI and trial can be found as
``experiment.sep[roi, trial][i, :]``.
experiment.result
Final output, in order of presence in the ROI.
Signal ``i`` for a specific ROI and trial can be found at
``experiment.result[roi, trial][i, :]``.
Note that the ordering is such that ``i = 0`` is the signal
most strongly present in the ROI, and subsequent entries
are in diminishing order.
experiment.mixmat
The mixing matrix, which maps from ``experiment.raw`` to
``experiment.sep``.
experiment.info
Information about separation routine, iterations needed, etc.
Parameters
----------
redo_prep : bool, optional
Whether to redo the preparation. Default is ``False.`` Note that
if this is true, we set ``redo_sep = True`` as well.
redo_sep : bool, optional
Whether to redo the separation. Default is ``False``. Note that
this parameter is ignored if `redo_prep` is set to ``True``.
"""
# Get the timestamp for program start
t0 = time.time()
# Do data preparation
if redo_prep or self.raw is None:
self.separation_prep(redo_prep)
if redo_prep:
redo_sep = True
# Define filename to store data in
if self.folder is None:
fname = None
redo_sep = True
else:
fname = os.path.join(self.folder, "separated.npz")
if fname is None or not os.path.isfile(fname):
redo_sep = True
if not redo_sep:
try:
self.clear_separated()
self.load(fname)
if self.result is not None:
return
except BaseException as err:
print("An error occurred while loading {}".format(fname))
print(err)
print(
"Signal separation will be redone and {} overwritten"
"".format(fname)
)
# Wipe outputs
self.clear_separated()
# Adopt default values
self._adopt_default_parameters()
# Check size of the input arrays
n_roi = len(self.raw)
n_trial = len(self.raw[0])
# Print what data will be analysed
if self.verbosity >= 2:
msg = "Doing signal separation for {} ROIs over {} trials...".format(
n_roi, n_trial
)
msg += "\n method: {}".format(repr(self.method))
if "ica" not in self.method.lower():
msg += "\n alpha: {}".format(repr(self.alpha))
for key in ["max_iter", "max_tries", "tol"]:
msg += "\n {}: {}".format(key, repr(getattr(self, key)))
print(msg)
sys.stdout.flush()
# Make a handle to the separation function with parameters configured
_separate_cfg = functools.partial(
separate_trials,
alpha=self.alpha,
max_iter=self.max_iter,
tol=self.tol,
max_tries=self.max_tries,
method=self.method,
verbosity=self.verbosity - 2,
total=n_roi,
)
# check whether we should show progress bars
disable_progressbars = self.verbosity <= 0 or 3 <= self.verbosity
# Check how many workers to spawn.
# Map the behaviour of ncores=None to one job per CPU core, like for
# multiprocessing.Pool(processes=None). With joblib, this is
# joblib.Parallel(n_jobs=-1) instead.
n_jobs = -1 if self.ncores_separation is None else self.ncores_separation
# Do the extraction
if 0 <= n_jobs <= 1:
# Don't use multiprocessing
outputs = [
_separate_cfg(X, label=i)
for i, X in tqdm(
enumerate(self.raw),
total=len(self.raw),
desc="Separating data",
disable=disable_progressbars,
)
]
else:
# Use multiprocessing
outputs = Parallel(
n_jobs=n_jobs, backend="threading", verbose=max(0, self.verbosity - 5)
)(
delayed(_separate_cfg)(X, label=i)
for i, X in tqdm(
enumerate(self.raw),
total=len(self.raw),
desc="Separating data",
disable=disable_progressbars,
)
)
# Define output shape as an array of objects shaped (n_roi, n_trial)
sep = np.empty((n_roi, n_trial), dtype=object)
result = np.empty_like(sep)
mixmat = np.empty_like(sep)
info = np.empty_like(sep)
# Place our outputs into the initialised arrays
for i_roi, (sep_i, match_i, mixmat_i, conv_i) in enumerate(outputs):
sep[i_roi, :] = sep_i
result[i_roi, :] = match_i
mixmat[i_roi, :] = [mixmat_i] * n_trial
info[i_roi, :] = conv_i
# list non-converged cells
non_converged_rois = [
i_roi for i_roi, info_i in enumerate(info) if not info_i[0]["converged"]
]
if self.verbosity >= 1:
message = "Finished separating signals from {} ROIs across {} trials in {}".format(
n_roi,
n_trial,
_pretty_timedelta(seconds=time.time() - t0),
)
if len(non_converged_rois) > 0:
message += (
"\n"
"Separation did not converge for the following {} ROIs: {}."
"\nConsider increasing max_iter (currently set to {})"
" or other FISSA parameters if this happens often and/or"
" to a lot of cells.".format(
len(non_converged_rois), non_converged_rois, self.max_iter
)
)
print(message)
sys.stdout.flush()
# Set outputs
self.info = info
self.mixmat = mixmat
self.sep = sep
self.result = result
# Maybe save to cache file
if self.folder is not None:
self.save_separated()
def save_separated(self, destination=None):
r"""
Save separated signals to an npz file.
.. versionadded:: 1.0.0
Parameters
----------
destination : str, optional
Path to output file. The default destination is ``"separated.npz"``
within the cache directory ``experiment.folder``.
"""
fields = set(
self._preparation_params
+ self._separation_params
+ self._separation_outputs
+ self._deltaf_outputs
)
if destination is None:
if self.folder is None:
raise ValueError(
"The folder attribute must be declared in order to save"
" separation outputs to the cache."
)
destination = os.path.join(self.folder, "separated.npz")
if self.verbosity >= 1:
print("Saving results to {}".format(destination))
sys.stdout.flush()
destdir = os.path.dirname(destination)
if destdir and not os.path.isdir(destdir):
os.makedirs(destdir)
np.savez_compressed(
destination,
**{
field: getattr(self, field)
for field in fields
if getattr(self, field) is not None
}
)
def calc_deltaf(self, freq, use_raw_f0=True, across_trials=True):
r"""
Calculate Δf/f0 for raw and result traces.
The outputs are found in the :attr:`deltaf_raw` and
:attr:`deltaf_result` attributes, which can be accessed at
``experiment.deltaf_raw`` and ``experiment.deltaf_result``.
Parameters
----------
freq : float
Imaging frequency, in Hz.
use_raw_f0 : bool, optional
If ``True`` (default), use an f0 estimate from the raw ROI trace
for both raw and result traces. If ``False``, use individual f0
estimates for each of the traces.
across_trials : bool, optional
If ``True``, we estimate a single baseline f0 value across all
trials. If ``False``, each trial will have their own baseline f0,
and Δf/f\ :sub:`0` value will be relative to the trial-specific f0.
Default is ``True``.
"""
# Get the timestamp for program start
t0 = time.time()
if self.verbosity >= 2:
msg = "Calculating Δf/f0 for raw and result signals"
if self.verbosity < 3:
pass
elif across_trials:
msg += " (same f0 across all trials"
else:
msg += " (different f0 baseline for each trial"
if self.verbosity < 3:
pass
elif use_raw_f0:
msg += ", using f0 in raw data for result)"
else:
msg += ")"
msg += (
"\nCaution: Measuring baseline f0 from result may result"
" in division by zero."
)
print(msg)
sys.stdout.flush()
# Initialise output arrays
deltaf_raw = np.empty_like(self.raw)
deltaf_result = np.empty_like(self.result)
# Can't include Δ in the tqdm description on Python2
desc = "Calculating {}f/f0".format("d" if sys.version_info < (3, 0) else "Δ")
# Check size of the input arrays
n_roi = len(self.result)
n_trial = len(self.result[0])
# Loop over cells
for i_roi in tqdm(
range(n_roi),
total=n_roi,
desc=desc,
disable=self.verbosity < 1,
):
# if deltaf should be calculated across all trials
if across_trials:
# get concatenated traces
raw_conc = np.concatenate(self.raw[i_roi], axis=1)[0, :]
result_conc = np.concatenate(self.result[i_roi], axis=1)
# calculate Δf/f0
raw_f0 = deltaf.findBaselineF0(raw_conc, freq)
raw_conc = (raw_conc - raw_f0) / raw_f0
result_f0 = deltaf.findBaselineF0(result_conc, freq, 1).T[:, None]
if use_raw_f0:
result_conc = (result_conc - result_f0) / raw_f0
else:
result_conc = (result_conc - result_f0) / result_f0
# store Δf/f0
curTrial = 0
for i_trial in range(n_trial):
nextTrial = curTrial + self.raw[i_roi][i_trial].shape[1]
signal = raw_conc[curTrial:nextTrial]
deltaf_raw[i_roi][i_trial] = np.expand_dims(signal, axis=0)
signal = result_conc[:, curTrial:nextTrial]
deltaf_result[i_roi][i_trial] = signal
curTrial = nextTrial
else:
# loop across trials
for i_trial in range(n_trial):
# get current signals
raw_sig = self.raw[i_roi][i_trial][0, :]
result_sig = self.result[i_roi][i_trial]
# calculate Δf/fo
raw_f0 = deltaf.findBaselineF0(raw_sig, freq)
result_f0 = deltaf.findBaselineF0(result_sig, freq, 1).T[:, None]
result_f0[result_f0 < 0] = 0
raw_sig = (raw_sig - raw_f0) / raw_f0
if use_raw_f0:
result_sig = (result_sig - result_f0) / raw_f0
else:
result_sig = (result_sig - result_f0) / result_f0
# store Δf/f0
deltaf_raw[i_roi][i_trial] = np.expand_dims(raw_sig, axis=0)
deltaf_result[i_roi][i_trial] = result_sig
self.deltaf_raw = deltaf_raw
self.deltaf_result = deltaf_result
if self.verbosity >= 1:
print(
"Finished calculating Δf/f0 for raw and result signals in {}".format(
_pretty_timedelta(seconds=time.time() - t0)
)
)
sys.stdout.flush()
# Maybe save to cache file
if self.folder is not None:
self.save_separated()
def to_matfile(self, fname=None, legacy=False):
r"""Save the results to a MATLAB file.
.. versionadded:: 1.0.0
This will generate a MAT-file (.mat) which can be loaded into MATLAB.
The MAT-file contains structs for all the experiment output attributes
(:attr:`roi_polys`, :attr:`result`, :attr:`raw`, etc.)
and analysis parameters (:attr:`expansion`, :attr:`nRegions`,
:attr:`alpha`, etc.).
If Δf/f\ :sub:`0` was calculated with :meth:`calc_deltaf`,
:attr:`deltaf_result` and :attr:`deltaf_raw` are also included.
These can be interfaced with as illustrated below.
``result{1, 1}(1, :)``
The separated signal for the first ROI and first trial.
This is equivalent to ``experiment.result[0, 0][0, :]`` when
interacting with the :class:`Experiment` object in Python.
``result{roi, trial}(1, :)``
The separated signal for the ``roi``-th ROI and ``trial``-th trial.
This is equivalent to
``experiment.result[roi - 1, trial - 1][0, :]`` when
interacting with the :class:`Experiment` object in Python.
``result{roi, trial}(2, :)``
A contaminating signal.
``raw{roi, trial}(1, :)``
Raw measured neuronal signal, averaged over the ROI.
This is equivalent to ``experiment.raw[roi - 1, trial - 1][0, :]``
when interacting with the :class:`Experiment` object in Python.
``raw{roi, trial}(2, :)``
Raw signal from first neuropil region (of ``nRegions``).
``roi_polys{roi, trial}{1}``
Polygon outlining the ROI, as an n-by-2 array of coordinates.
``roi_polys{roi, trial}{2}``
Polygon outlining the first neuropil region (of ``nRegions``),
as an n-by-2 array of coordinates.
Examples
--------
Here are some example MATLAB plots.
Plotting raw and decontaminated traces:
.. code:: octave
% Load the FISSA output data
S = load('separated.mat')
% Separated signal for the third ROI, second trial
roi = 3; trial = 2;
% Plot the raw and result traces for the ROI signal
figure; hold on;
plot(S.raw{roi, trial}(1, :));
plot(S.result{roi, trial}(1, :));
title(sprintf('ROI %d, Trial %d', roi, trial));
xlabel('Time (frame number)');
ylabel('Signal intensity (candela per unit area)');
legend({'Raw', 'Result'});
If all ROIs are contiguous and described by a single contour,
the the mean image and ROI locations for one trial can be plotted as
follows:
.. code:: octave
% Load the FISSA output data
S = load('separated.mat')
trial = 1;
figure; hold on;
% Plot the mean image
imagesc(squeeze(S.means(trial, :, :)));
colormap('gray');
% Plot ROI locations
for i_roi = 1:size(S.result, 1);
contour = S.roi_polys{i_roi, trial}{1};
plot(contour(:, 2), contour(:, 1));
end
set(gca, 'YDir', 'reverse');
Parameters
----------
fname : str, optional
Destination for output file. The default is a file named
``"separated.mat"`` within the cache save directory for the
experiment (the :attr:`folder` argument when the
:class:`Experiment` instance was created).
legacy : bool, default=False
Whether to use the legacy format of :meth:`save_to_matlab`.
This also changes the default output name to ``"matlab.mat"``.
"""
default_name = "separated.mat"
if legacy:
default_name = "matlab.mat"
# define filename
if fname is None:
if self.folder is None:
raise ValueError(
"fname must be provided if experiment folder is undefined"
)
fname = os.path.join(self.folder, default_name)
# initialize dictionary to save
M = collections.OrderedDict()
def reformat_dict_for_legacy(orig_dict):
new_dict = collections.OrderedDict()
# loop over cells and trial
for i_roi in range(len(self.result)):
# get current cell label
c_lab = "cell" + str(i_roi)
# update dictionary
new_dict[c_lab] = collections.OrderedDict()
for i_trial in range(len(self.result[0])):
# get current trial label
t_lab = "trial" + str(i_trial)
# update dictionary
new_dict[c_lab][t_lab] = orig_dict[i_roi][i_trial]
return new_dict
if legacy:
M["ROIs"] = reformat_dict_for_legacy(self.roi_polys)
M["raw"] = reformat_dict_for_legacy(self.raw)
M["result"] = reformat_dict_for_legacy(self.result)
if getattr(self, "deltaf_raw", None) is not None:
M["df_raw"] = reformat_dict_for_legacy(self.deltaf_raw)
if getattr(self, "deltaf_result", None) is not None:
M["df_result"] = reformat_dict_for_legacy(self.deltaf_result)
else:
fields = (
self._preparation_params
+ self._separation_params
+ self._preparation_outputs
+ self._separation_outputs
+ self._deltaf_outputs
)
for field in fields:
x = getattr(self, field)
if x is None:
continue
M[field] = x
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="Creating an ndarray from ragged nested sequences",
)
savemat(fname, M)
def save_to_matlab(self, fname=None):
r"""
Save the results to a MATLAB file.
.. deprecated:: 1.0.0
Use ``experiment.to_matfile(legacy=True)`` instead.
This will generate a .mat file which can be loaded into MATLAB to
provide structs: ROIs, result, raw.
If Δf/f\ :sub:`0` was calculated, these will also be stored as ``df_result``
and ``df_raw``, which will have the same format as ``result`` and
``raw``.
These can be interfaced with as follows, for ROI 0, trial 0:
``ROIs.cell0.trial0{1}``
Polygon outlining the ROI.
``ROIs.cell0.trial0{2}``
Polygon outlining the first (of ``nRegions``) neuropil region.
``result.cell0.trial0(1, :)``
Final extracted neuronal signal.
``result.cell0.trial0(2, :)``
Contaminating signal.
``raw.cell0.trial0(1, :)``
Raw measured cell signal, average over the ROI.
``raw.cell0.trial0(2, :)``
Raw signal from first (of ``nRegions``) neuropil region.
Parameters
----------
fname : str, optional
Destination for output file. Default is a file named
``"matlab.mat"`` within the cache save directory for the experiment
(the `folder` argument when the ``Experiment`` instance was created).
See Also
--------
Experiment.to_matfile
"""
warnings.warn(
"The experiment.save_to_matlab() method is deprecated."
" Please use experiment.to_matfile(legacy=True) instead.",
DeprecationWarning,
)
return self.to_matfile(fname=fname, legacy=True)
def run_fissa(
images,
rois,
folder=None,
freq=None,
return_deltaf=False,
deltaf_across_trials=True,
export_to_matlab=False,
**kwargs
):
r"""
Functional interface to run FISSA.
.. versionadded:: 1.0.0
Uses the methodology described in
`FISSA: A neuropil decontamination toolbox for calcium imaging signals <doi_>`_.
.. _doi: https://www.doi.org/10.1038/s41598-018-21640-2
Parameters
----------
images : str or list
The raw recording data.
Should be one of:
- the path to a directory containing TIFF files (string),
- a list of paths to TIFF files (list of strings),
- a list of :term:`array_like` data already loaded into memory, each
shaped ``(n_frames, height, width)``.
Note that each TIFF/array is considered a single trial.
rois : str or list
The roi definitions.
Should be one of:
- the path to a directory containing ImageJ ZIP files (string),
- the path of a single ImageJ ZIP file (string),
- a list of ImageJ ZIP files (list of strings),
- a list of arrays, each encoding a ROI polygons,
- a list of lists of binary arrays, each representing a ROI mask.
This can either be a single roiset for all trials, or a different
roiset for each trial.
folder : str, optional
Path to a cache directory from which pre-extracted data will
be loaded if present, and saved to otherwise. If `folder` is
unset, the experiment data will not be saved.
freq : float, optional
Imaging frequency, in Hz. Required if ``return_deltaf=True``.
return_deltaf : bool, optional
Whether to return Δf/f\ :sub:`0`. Otherwise, the decontaminated signal
is returned scaled against the raw recording. Default is ``False``.
deltaf_across_trials : bool, default=True
If ``True``, we estimate a single baseline f0 value across all
trials when computing Δf/f\ :sub:`0`.
If ``False``, each trial will have their own baseline f0, and
Δf/f\ :sub:`0` value will be relative to the trial-specific f0.
Default is ``True``.
export_to_matlab : bool or str or None, default=False
Whether to export the data to a MATLAB-compatible .mat file.
If `export_to_matlab` is a string, it is used as the path to the output
file. If ``export_to_matlab=True``, the matfile is saved to the
default path of ``"separated.mat"`` within the `folder` directory, and
`folder` must be set. If this is ``None``, the matfile is exported to
the default path if `folder` is set, and otherwise is not exported.
Default is ``False``.
**kwargs
Additional keyword arguments as per :class:`Experiment`.
Returns
-------
result : 2d numpy.ndarray of 2d numpy.ndarrays of np.float64
The vector ``result[roi, trial][0, :]`` is the trace from ROI ``roi``
in trial ``trial``.
If ``return_deltaf=True``, this is Δf/f\ :sub:`0`;
otherwise, it is the decontaminated signal scaled as per the raw
signal. f\ :sub:`0` is the baseline as calculated from the raw
signal.
raw : 2d numpy.ndarray of 2d numpy.ndarrays of np.float64
The raw traces without separation.
The vector ``raw[c, t][0, :]`` is the ROI trace from cell ``c`` in
trial ``t``. The vector ``raw[c, t][i, :]`` for i>=1 the trace from
cell ``c`` in trial ``t``, from neuropil region ``i-1``.
If ``return_deltaf=True``, this is Δf/f\ :sub:`0`; otherwise it's
the raw extracted signal.
See Also
--------
fissa.core.Experiment
"""
# Parse arguments
if export_to_matlab is None:
export_to_matlab = folder is not None
if return_deltaf and freq is None:
raise ValueError("The argument `freq` must be set to determine df/f0.")
# Make a new Experiment object
experiment = Experiment(images, rois, folder=folder, **kwargs)
# Run separation
experiment.separate()
# Calculate Δf/f0
if return_deltaf or (export_to_matlab and freq is not None):
experiment.calc_deltaf(freq=freq, across_trials=deltaf_across_trials)
# Save to matfile
if export_to_matlab:
matlab_fname = None if isinstance(export_to_matlab, bool) else export_to_matlab
experiment.to_matfile(matlab_fname)
# Return appropriate data
if return_deltaf:
return experiment.deltaf_result, experiment.deltaf_raw
return experiment.result, experiment.raw
|
rochefort-lab/fissa
|
fissa/core.py
|
Python
|
gpl-3.0
| 74,650
|
[
"NEURON"
] |
56231f38a0a3ada54b51d15ddd2c1fa98e0be44813ec1f58b4c29e8c4f972cf9
|
from galaxy.jobs import JobDestination
import os
import sys
import json
import cStringIO
import logging
log = logging.getLogger( __name__ )
def dump(obj, nested_level=0, output=sys.stdout):
spacing = ' '
if type(obj) == dict:
print >> output, '%s{' % ((nested_level) * spacing)
for k, v in obj.items():
if hasattr(v, '__iter__'):
print >> output, '%s%s:' % ((nested_level + 1) * spacing, k)
dump(v, nested_level + 1, output)
else:
print >> output, '%s%s: %s' % ((nested_level + 1) * spacing, k, v)
print >> output, '%s}' % (nested_level * spacing)
elif type(obj) == list:
print >> output, '%s[' % ((nested_level) * spacing)
for v in obj:
if hasattr(v, '__iter__'):
dump(v, nested_level + 1, output)
else:
print >> output, '%s%s' % ((nested_level + 1) * spacing, v)
print >> output, '%s]' % ((nested_level) * spacing)
else:
print >> output, '%s%s' % (nested_level * spacing, obj)
def dynamic_slurm_cluster_gatk(job, tool_id):
# Allocate extra time
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
inp_data.update( [ ( da.name, json.loads(da.value) ) for da in job.parameters ] )
out = cStringIO.StringIO()
dump(inp_data, 1, out)
log.debug(out.getvalue())
nativeSpecs = '--nodes=1 --ntasks=1'
# runner doesn't allow to specify --cpus-per-task
# thus the mem calculation gets messy with more than 1 node
# --> translate nt ==> nodes, nct ==> ntasks
if 'cond_threads' not in inp_data:
return JobDestination(runner="slurm")
if inp_data['cond_threads']['cond_threads_enabled'] == "True":
nNodes = int(inp_data['cond_threads']['nt'])
nCPU = int(inp_data['cond_threads']['nct'])
nMEM = int(inp_data['cond_threads']['mem'])
if nMEM > 0:
nativeSpecs = '--nodes=%d --ntasks=%d --mem=%d' % (nNodes, nCPU*nNodes, nMEM)
else:
nativeSpecs = '--nodes=%d --ntasks=%d' % (nNodes, nCPU*nNodes)
return JobDestination(runner="slurm", params={"nativeSpecification": nativeSpecs})
|
kaktus42/galaxytools
|
tools/gatk/tool-data/destinations.py
|
Python
|
gpl-2.0
| 2,338
|
[
"Galaxy"
] |
aa2ce769c1aebfbcfb036a8185683c522dad3abea69ac3c99ade9a6e065285e9
|
"""
Copyright 2014, Roberto Paleari (@rpaleari)
Abstract class for QTrace output modules.
"""
import abc
import trace.reader
import trace.syscall
class OutputGenerator(object):
__metaclass__ = abc.ABCMeta
def __init__(self, stream):
self.__stream = stream
self.__stream.write(self._prologue())
# Taint label to system call object map
self.__taintmap = {}
def __del__(self):
self.__stream.write(self._epilogue())
@abc.abstractmethod
def _visitHeader(self, obj):
pass
@abc.abstractmethod
def _visitSyscall(self, obj):
pass
@abc.abstractmethod
def _visitArgument(self, argno, obj):
pass
@abc.abstractmethod
def _prologue(self):
pass
@abc.abstractmethod
def _epilogue(self):
pass
def getSyscallFromLabel(self, label):
"""
Return the system call object that defines taint label "label", or None if
not found.
"""
return self.__taintmap.get(label, None)
def __updateTaintMap(self, sysobj):
"""
Update the taint labels map, associating labels defined by system call
object "sysobj" with the object itself.
"""
outlabels = [sysobj.taintlabel_retval, ]
outlabels.extend(sysobj.getTaintDefs())
for label in outlabels:
defobj = self.__taintmap.get(label, None)
assert defobj is None or defobj.idz == sysobj.idz
self.__taintmap[label] = sysobj
def visit(self, obj):
t = type(obj)
if t == trace.reader.TraceHeader:
s = self._visitHeader(obj)
elif t == trace.syscall.Syscall:
self.__updateTaintMap(obj)
s = self._visitSyscall(obj)
else:
assert False, "Unexpected object: %s" % t
self.__stream.write(s)
|
rpaleari/qtrace
|
tools/output/output.py
|
Python
|
gpl-2.0
| 1,867
|
[
"VisIt"
] |
d886698ce459141243ae6e80e81f286b9313ae84c83f943a249c3b6ea806653d
|
# Copyright 2014 NeuroData (http://neurodata.io)
#
#Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#RBTODO --- refactor other fields like ROI children
# e.g. Node children, Skeleton nodes, other TODOs in file
import StringIO
import tempfile
import numpy as np
import zlib
import h5py
import os
import cStringIO
import csv
import re
import json
import blosc
from PIL import Image
import MySQLdb
import itertools
from contextlib import closing
from libtiff import TIFF
from operator import sub, add
from libtiff import TIFFfile, TIFFimage
import webservices.restargs as restargs
from ndlib.ndtype import *
from spdb.spatialdb import SpatialDB
from ndproj.ndprojdb import NDProjectsDB
from ndproj.ndchannel import NDChannel
from ndproj.ndproject import NDProject
from ndramon import h5ann
from ndramon.annotation import *
from ndramon.ramondb import RamonDB
from ndproj import h5projinfo
from ndproj import jsonprojinfo
import mcfc
from ndlib.ndctypelib import filter_ctype_OMP
from spdb.ndcube.timecube8 import TimeCube8
import webservices.ndwsskel
from webservices.ndwsnifti import ingestNIFTI, queryNIFTI
from ndlib.windowcutout import windowCutout
from ndlib.ndtype import TIMESERIES_CHANNELS, IMAGE_CHANNELS, ANNOTATION_CHANNELS, NOT_PROPAGATED, UNDER_PROPAGATION, PROPAGATED, ND_dtypetonp, DTYPE_uint8, DTYPE_uint16, DTYPE_uint32, READONLY_TRUE, READONLY_FALSE
from webservices.ndwserror import NDWSError, IncorrectSyntaxError
import logging
logger = logging.getLogger("neurodata")
#RBTODO check all the zoom in zoom out and write unittests.
def cutout (image_args, ch, proj, db):
"""Build and Return a cube of data for the specified dimensions. This method is called by all of the more basic services to build the data. They then format and refine the output. """
# Perform argument processing
try:
rest_args = restargs.BrainRestArgs ()
rest_args.cutoutArgs(image_args, proj.datasetcfg)
except restargs.RESTArgsError as e:
logger.error("REST Arguments {} failed: {}".format(image_args, e))
raise NDWSError(str(e))
# Extract the relevant values
corner = rest_args.getCorner()
dim = rest_args.getDim()
resolution = rest_args.getResolution()
filterlist = rest_args.getFilter()
neariso = rest_args.getZScaling()
direct = rest_args.getDirect()
timerange = rest_args.getTimeRange()
windowrange = rest_args.getWindowRange()
# Perform the cutout
if timerange == None:
# support for 3-d cutouts
cube = db.cutout(ch, corner, dim, resolution, timerange=ch.default_time_range, neariso=neariso, direct=direct)
else:
# 4-d cutouts
cube = db.cutout(ch, corner, dim, resolution, timerange=timerange, neariso=neariso, direct=direct)
filterCube(ch, cube, filterlist)
if timerange==None:
# convert 4-d to 3-d here for now
cube.data = cube.data.reshape(cube.data.shape[1:])
# window range on cutout only when specified by argument -- no defaults for now
if windowrange!= None:
if ch.channel_datatype == 'float32':
windowrange = [float(x) for x in windowrange]
else:
windowrange = [int(x) for x in windowrange]
cbnew = TimeCube8 ( )
cbnew.data = window(cube.data, ch, window_range=windowrange)
return cbnew
else:
return cube
def filterCube(ch, cube, filterlist=None):
"""Call Filter on a cube"""
if ch.channel_type in ANNOTATION_CHANNELS and filterlist is not None:
cube.data = filter_ctype_OMP ( cube.data, filterlist )
elif filterlist is not None and ch.channel_type not in ANNOTATION_CHANNELS:
logger.error("Filter only possible for Annotation Channels")
raise NDWSError("Filter only possible for Annotation Channels")
def channelIterCutout(channels, imageargs, proj, db):
"""Create a numpy datacube array using data from the given channels."""
try:
# extract the first channel
channel_list = channels.split(',')
ch = proj.getChannelObj(channel_list[0])
# call cutout for first channel
channel_data = cutout( imageargs, ch, proj, db ).data
cubedata = np.zeros ( (len(channel_list),)+channel_data.shape[:], dtype=channel_data.dtype )
cubedata[0,:] = cutout(imageargs, ch, proj, db).data
# iterate from second to nth channel
for idx,channel_name in enumerate(channel_list[1:]):
if channel_name == '0':
continue
else:
ch = proj.getChannelObj(channel_name)
if ND_dtypetonp[ch.channel_datatype] == cubedata.dtype:
cubedata[idx+1,:] = cutout(imageargs, ch, proj, db).data
else:
logger.error("The cutout {} can only contain cutouts of one single Channel Type.".format())
raise NDWSError("The cutout {} can only contain cutouts of one single Channel Type.".format())
return cubedata
except Exception as e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def numpyZip ( chanargs, proj, db ):
"""Return a web readable Numpy Pickle zipped"""
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w\.,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise NDWSError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
cubedata = channelIterCutout(channels, imageargs, proj, db)
# Create the compressed cube
fileobj = cStringIO.StringIO ()
np.save ( fileobj, cubedata )
cdz = zlib.compress (fileobj.getvalue())
# Package the object as a Web readable file handle
fileobj = cStringIO.StringIO(cdz)
fileobj.seek(0)
return fileobj.read()
except Exception as e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def RAW ( chanargs, proj, db ):
"""Return a web readable raw binary representation (knossos format).
It's a simple binary representation with the multidimensional array being
converted into a byte array in C-style iteration over the matrix."""
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w+,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise NDWSError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
cubedata = channelIterCutout(channels, imageargs, proj, db)
binary_representation = cubedata.tobytes("C")
return binary_representation
except Exception as e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def JPEG ( chanargs, proj, db ):
"""Return a web readable JPEG File"""
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w+,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise NDWSError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
ch = proj.getChannelObj(channels.split(',')[0])
cubedata = channelIterCutout(channels, imageargs, proj, db)
xdim, ydim, zdim = cubedata[0,:,:,:].shape[::-1]
#cubedata = np.swapaxes(cubedata[0,:,:,:], 0,2).reshape(xdim*zdim, ydim)
cubedata = cubedata[0,:,:,:].reshape(ydim*zdim, xdim)
if ch.channel_datatype in DTYPE_uint16:
img = Image.fromarray(cubedata, mode='I;16')
img = img.point(lambda i:i*(1./256)).convert('L')
elif ch.channel_datatype in DTYPE_uint32:
img = Image.fromarray(cubedata, mode='RGBA')
else:
img = Image.fromarray(cubedata)
fileobj = cStringIO.StringIO ()
img.save ( fileobj, "JPEG" )
fileobj.seek(0)
return fileobj.read()
except Exception as e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def BLOSC ( chanargs, proj, db ):
"""Return a web readable blosc file"""
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w+,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise NDWSError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
cubedata = channelIterCutout(channels, imageargs, proj, db)
# Create the compressed cube
return blosc.pack_array(cubedata)
except Exception as e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def binZip ( chanargs, proj, db ):
"""Return a web readable Numpy Pickle zipped"""
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w+,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise NDWSError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
cubedata = channelIterCutout(channels, imageargs, proj, db)
# Create the compressed cube
cdz = zlib.compress (cubedata.tostring())
# Package the object as a Web readable file handle
fileobj = cStringIO.StringIO(cdz)
fileobj.seek(0)
return fileobj.read()
except Exception,e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def HDF5(chanargs, proj, db):
"""Return a web readable HDF5 file"""
# Create an in-memory HDF5 file
tmpfile = tempfile.NamedTemporaryFile()
fh5out = h5py.File(tmpfile.name, driver='core', backing_store=True)
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w+,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise IncorrectSyntaxError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
for channel_name in channels.split(','):
ch = proj.getChannelObj(channel_name)
cube = cutout(imageargs, ch, proj, db)
changrp = fh5out.create_group( "{}".format(channel_name) )
changrp.create_dataset("CUTOUT", tuple(cube.data.shape), cube.data.dtype, compression='gzip', data=cube.data.reshape(cube.data.shape))
changrp.create_dataset("CHANNELTYPE", (1,), dtype=h5py.special_dtype(vlen=str), data=ch.channel_type)
changrp.create_dataset("DATATYPE", (1,), dtype=h5py.special_dtype(vlen=str), data=ch.channel_datatype)
fh5out.close()
tmpfile.seek(0)
return tmpfile.read()
except Exception as e:
fh5out.close()
tmpfile.close()
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
def postTiff3d ( channel, postargs, proj, db, postdata ):
"""Upload a tiff to the database"""
# get the channel
ch = proj.getChannelObj(channel)
if ch.channel_datatype in DTYPE_uint8:
datatype=np.uint8
elif ch.channel_datatype in DTYPE_uint16:
datatype=np.uint16
elif ch.channel_datatype in DTYPE_uint32:
datatype=np.uint32
else:
logger.error("Unsupported data type for TIFF3d post. {}".format(ch.channel_datatype))
raise NDWSError ("Unsupported data type for TIFF3d post. {}".format(ch.channel_datatype))
# parse the args
resstr, xoffstr, yoffstr, zoffstr, rest = postargs.split('/',4)
resolution = int(resstr)
projoffset = proj.datasetcfg.get_offset(resolution)
xoff = int(xoffstr)-projoffset[0]
yoff = int(yoffstr)-projoffset[1]
zoff = int(zoffstr)-projoffset[2]
# RBTODO check that the offsets are legal
# read the tiff data into a cuboid
with closing (tempfile.NamedTemporaryFile()) as tmpfile:
tmpfile.write( postdata )
tmpfile.seek(0)
tif = TIFF.open(tmpfile.name)
# get tiff metadata
image_width = tif.GetField("ImageWidth")
image_length = tif.GetField("ImageLength")
# get a z batch -- how many slices per cube
zbatch = proj.datasetcfg.get_cubedim(resolution)[0]
dircount = 0
dataar = None
# read each one at a time
for image in tif.iter_images():
# allocate a batch every cubesize
if dircount % zbatch == 0:
dataarray = np.zeros((zbatch, image_length, image_width), dtype=datatype)
dataarray[dircount%zbatch,:,:] = image
dircount += 1
# if we have a full batch go ahead and ingest
if dircount % zbatch == 0:
corner = ( xoff, yoff, zoff+dircount-zbatch )
db.writeCuboid (ch, corner, resolution, dataarray)
# ingest any remaining data
corner = ( xoff, yoff, zoff+dircount-(dircount%zbatch) )
db.writeCuboid (ch, corner, resolution, dataarray[0:(dircount%zbatch),:,:])
def timeDiff ( chanargs, proj, db):
"""Return a 3d delta in time"""
try:
# argument of format channel/service/imageargs
m = re.match("([\w+,]+)/(\w+)/([\w+,/-]+)$", chanargs)
[channels, service, imageargs] = [i for i in m.groups()]
except Exception as e:
logger.error("Arguments not in the correct format {}. {}".format(chanargs, e))
raise NDWSError("Arguments not in the correct format {}. {}".format(chanargs, e))
try:
channel_list = channels.split(',')
ch = proj.getChannelObj(channel_list[0])
channel_data = cutout( imageargs, ch, proj, db ).data
channel_data = np.negative(np.diff(np.float32(channel_data), axis=0))
cubedata = np.zeros((len(channel_list),)+channel_data.shape, dtype=np.float32)
cubedata[0,:] = channel_data
# if one channel convert 3-d to 4-d array
for idx,channel_name in enumerate(channel_list[1:]):
if channel_name == '0':
continue
else:
ch = proj.getChannelObj(channel_name)
if ND_dtypetonp[ch.channel_datatype] == cubedata.dtype:
cubedata[idx+1,:] = np.diff(cutout(imageargs, ch, proj, db).data, axis=0)
else:
raise NDWSError("The npz cutout can only contain cutouts of one single Channel Type.")
# Create the compressed cube
return blosc.pack_array(cubedata)
except Exception as e:
raise NDWSError("{}".format(e))
def tiff3d ( chanargs, proj, db ):
"""Return a 3d tiff file"""
[channels, service, imageargs] = chanargs.split('/', 2)
# create a temporary tif file
tmpfile = tempfile.NamedTemporaryFile()
tif = TIFF.open(tmpfile.name, mode='w')
try:
for channel_name in channels.split(','):
ch = proj.getChannelObj(channel_name)
cube = cutout ( imageargs, ch, proj, db )
FilterCube ( imageargs, cube )
# RB -- I think this is a cutout format. So, let's not recolor.
# # if it's annotations, recolor
# if ch.channel_type in ndprojdb.ANNOTATION_CHANNELS:
#
# imagemap = np.zeros ( (cube.data.shape[0]*cube.data.shape[1], cube.data.shape[2]), dtype=np.uint32 )
#
# # turn it into a 2-d array for recolor -- maybe make a 3-d recolor
# recolor_cube = recolor_ctype( cube.data.reshape((cube.data.shape[0]*cube.data.shape[1], cube.data.shape[2])), imagemap )
#
# # turn it back into a 4-d array RGBA
# recolor_cube = recolor_cube.view(dtype=np.uint8).reshape((cube.data.shape[0],cube.data.shape[1],cube.data.shape[2], 4 ))
#
# for i in range(recolor_cube.shape[0]):
# tif.write_image(recolor_cube[i,:,:,0:3], write_rgb=True)
#
# else:
tif.write_image(cube.data)
except:
tif.close()
tmpfile.close()
raise
tif.close()
tmpfile.seek(0)
return tmpfile.read()
def FilterCube ( imageargs, cb ):
""" Return a cube with the filtered ids """
# Filter Function - used to filter
result = re.search ("filter/([\d/,]+)/",imageargs)
if result != None:
filterlist = np.array ( result.group(1).split(','), dtype=np.uint32 )
cb.data = filter_ctype_OMP ( cb.data, filterlist )
def window(data, ch, window_range=None ):
"""Performs a window transformation on the cutout area
window always returns 8-bit data.
Careful how you use it. load target data into timeseriescube8.
"""
if window_range is None:
window_range = ch.window_range
[startwindow, endwindow] = window_range
# KL TODO window with signed channels -a to +b
if (startwindow == endwindow == 0):
return np.uint8(data)
elif endwindow!=0:
data = windowCutout (data, window_range)
return np.uint8(data)
return data
def imgSlice(webargs, proj, db):
"""Return the cube object for any plane xy, yz, xz"""
try:
# argument of format channel/service/resolution/cutoutargs
# cutoutargs can be window|filter/value,value/
m = re.match("(\w+)/(xy|yz|xz)/(\d+)/([\d+,/]+)?(.*)?$", webargs)
[channel, service, resolution, imageargs] = [i for i in m.groups()[:-1]]
imageargs = resolution + '/' + imageargs
extra_args = m.groups()[-1]
except Exception as e:
logger.error("Incorrect arguments for imgSlice {}. {}".format(webargs, e))
raise NDWSError("Incorrect arguments for imgSlice {}. {}".format(webargs, e))
try:
# Rewrite the imageargs to be a cutout
if service == 'xy':
m = re.match("(\d+/\d+,\d+/\d+,\d+/)(\d+)/(\d+)?[/]?$", imageargs)
if m.group(3) is None:
cutoutargs = '{}{},{}/'.format(m.group(1), m.group(2), int(m.group(2))+1)
else:
cutoutargs = '{}{},{}/{},{}/'.format(m.group(1), m.group(2), int(m.group(2))+1, m.group(3), int(m.group(3))+1)
elif service == 'xz':
m = re.match("(\d+/\d+,\d+/)(\d+)(/\d+,\d+)/(\d+)?[/]?", imageargs)
if m.group(4) is None:
cutoutargs = '{}{},{}{}/'.format(m.group(1), m.group(2), int(m.group(2))+1, m.group(3))
else:
cutoutargs = '{}{},{}{}/{},{}/'.format(m.group(1), m.group(2), int(m.group(2))+1, m.group(3), m.group(4), int(m.group(4))+1)
elif service == 'yz':
m = re.match("(\d+/)(\d+)(/\d+,\d+/\d+,\d+)/(\d+)?[/]?", imageargs)
if m.group(4) is None:
cutoutargs = '{}{},{}{}/'.format(m.group(1), m.group(2), int(m.group(2))+1, m.group(3))
else:
cutoutargs = '{}{},{}{}/{},{}/'.format(m.group(1), m.group(2), int(m.group(2))+1, m.group(3), m.group(4), int(m.group(4))+1)
else:
raise "No such image plane {}".format(service)
except Exception as e:
logger.error ("Illegal image arguments={}. Error={}".format(imageargs,e))
raise NDWSError ("Illegal image arguments={}. Error={}".format(imageargs,e))
cutoutargs = cutoutargs + extra_args
# Perform the cutout
ch = proj.getChannelObj(channel)
cb = cutout(cutoutargs, ch, proj, db)
# perform default window if not specified
if not re.search("window", extra_args) and (cb.data.dtype == np.uint16 or cb.data.dtype == np.float32):
cbnew = TimeCube8 ( )
cbnew.data = window ( cb.data, ch )
return cbnew
else:
return cb
def imgPNG (proj, webargs, cb):
"""Return a png object for any plane"""
try:
# argument of format channel/service/resolution/cutoutargs
# cutoutargs can be window|filter/value,value/
m = re.match("(\w+)/(xy|yz|xz)/(\d+)/([\d+,/]+)(.*)?$", webargs)
[channel, service, resolution, imageargs] = [i for i in m.groups()[:-1]]
except Exception as e:
logger.error("Incorrect arguments for imgSlice {}. {}".format(webargs, e))
raise NDWSError("Incorrect arguments for imgSlice {}. {}".format(webargs, e))
# window argument
result = re.search (r"/window/([\d\.]+),([\d\.]+)/", webargs)
if result != None:
window = [str(i) for i in result.groups()]
else:
window = None
if service == 'xy':
img = cb.xyImage(window=window)
elif service == 'yz':
img = cb.yzImage(proj.datasetcfg.scale[int(resolution)][service])
elif service == 'xz':
img = cb.xzImage(proj.datasetcfg.scale[int(resolution)][service])
fileobj = cStringIO.StringIO ( )
img.save ( fileobj, "PNG" )
fileobj.seek(0)
return fileobj.read()
#
# Read individual annotation image slices xy, xz, yz
#
def imgAnno ( service, chanargs, proj, db, rdb ):
"""Return a plane fileobj.read() for a single objects"""
[channel, service, annoidstr, imageargs] = chanargs.split('/', 3)
ch = NDChannel.fromName(proj, channel)
annoids = [int(x) for x in annoidstr.split(',')]
# retrieve the annotation
if len(annoids) == 1:
anno = rdb.getAnnotation ( ch, annoids[0] )
if anno == None:
logger.error("No annotation found at identifier = {}".format(annoids[0]))
raise NDWSError ("No annotation found at identifier = {}".format(annoids[0]))
else:
iscompound = True if anno.__class__ in [AnnNeuron] else False;
else:
iscompound = False
try:
# Rewrite the imageargs to be a cutout
if service == 'xy':
m = re.match("(\d+/\d+,\d+/\d+,\d+/)(\d+)/", imageargs)
cutoutargs = '{}{},{}/'.format(m.group(1),m.group(2),int(m.group(2))+1)
elif service == 'xz':
m = re.match("(\d+/\d+,\d+/)(\d+)(/\d+,\d+)/", imageargs)
cutoutargs = '{}{},{}{}/'.format(m.group(1),m.group(2),int(m.group(2))+1,m.group(3))
elif service == 'yz':
m = re.compile("(\d+/)(\d+)(/\d+,\d+/\d+,\d+)/", imageargs)
cutoutargs = '{}{},{}{}/'.format(m.group(1),m.group(2),int(m.group(2))+1,m.group(3))
else:
raise "No such image plane {}".format(service)
except Exception as e:
logger.error ("Illegal image arguments={}. Error={}".format(imageargs,e))
raise NDWSError ("Illegal image arguments={}. Error={}".format(imageargs,e))
# Perform argument processing
try:
args = restargs.BrainRestArgs ();
args.cutoutArgs ( cutoutargs, proj.datasetcfg )
except restargs.RESTArgsError, e:
logger.error("REST Arguments %s failed: {}".format(chanargs,e))
raise NDWSError(e.value)
# Extract the relevant values
corner = args.getCorner()
dim = args.getDim()
resolution = args.getResolution()
# determine if it is a compound type (NEURON) and get the list of relevant segments
if iscompound:
# remap the ids for a neuron
dataids = rdb.getSegments ( ch, annoids[0] )
cb = db.annoCutout ( ch, dataids, timestamp, resolution, corner, dim, annoids[0] )
else:
# no remap when not a neuron
dataids = annoids
cb = db.annoCutout ( ch, dataids, timestamp, resolution, corner, dim, None)
# reshape to 2-d
if service == 'xy':
img = cb.xyImage ( )
elif service == 'xz':
img = cb.xzImage ( proj.datasetcfg.zscale[resolution] )
elif service == 'yz':
img = cb.yzImage ( proj.datasetcfg.zscale[resolution] )
fileobj = cStringIO.StringIO ( )
img.save ( fileobj, "PNG" )
fileobj.seek(0)
return fileobj.read()
def annId ( chanargs, proj, db ):
"""Return the annotation identifier of a voxel"""
# RBTODO timestamp should be in args 0 for now.
timestamp = 0
[channel, service, imageargs] = chanargs.split('/',2)
ch = NDChannel.fromName(proj, channel)
# Perform argument processing
(resolution, voxel) = restargs.voxel(imageargs, proj.datasetcfg)
# Get the identifier
return db.getVoxel(ch, timestamp, resolution, voxel)
def listIds ( chanargs, proj, db ):
"""Return the list of annotation identifiers in a region"""
[channel, service, imageargs] = chanargs.split('/', 2)
ch = NDChannel.fromName(proj,channel)
# Perform argument processing
try:
args = restargs.BrainRestArgs ();
args.cutoutArgs ( imageargs, proj.datasetcfg )
except restargs.RESTArgsError as e:
logger.error("REST Arguments {} failed: {}".format(imageargs,e))
raise NDWSError("REST Arguments {} failed: {}".format(imageargs,e))
# Extract the relevant values
corner = args.getCorner()
dim = args.getDim()
resolution = args.getResolution()
cb = db.cutout ( ch, corner, dim, resolution )
ids = np.unique(cb.data)
idstr=''.join([`id`+', ' for id in ids])
idstr1 = idstr.lstrip('0,')
return idstr1.rstrip(', ')
def selectService ( service, webargs, proj, db ):
"""Select the service and pass on the arguments to the appropiate function."""
if service in ['xy','yz','xz']:
return imgPNG(proj, webargs, imgSlice (webargs, proj, db))
elif service == 'hdf5':
return HDF5 ( webargs, proj, db )
elif service == 'tiff':
return tiff3d ( webargs, proj, db )
elif service in ['npz']:
return numpyZip ( webargs, proj, db )
elif service in ['blosc']:
return BLOSC ( webargs, proj, db )
elif service in ['raw']:
return RAW ( webargs, proj, db )
elif service in ['jpeg']:
return JPEG ( webargs, proj, db )
elif service in ['zip']:
return binZip ( webargs, proj, db )
elif service == 'id':
return annId ( webargs, proj, db )
elif service == 'ids':
return listIds ( webargs, proj, db )
elif service == 'diff':
return timeDiff ( webargs, proj, db )
elif service in ['xzanno', 'yzanno', 'xyanno']:
return imgAnno ( service.strip('anno'), webargs, proj, db )
else:
logger.error("An illegal Web GET service was requested {}. Args {}".format(service, webargs))
raise NDWSError("An illegal Web GET service was requested {}. Args {}".format(service, webargs))
def selectPost ( webargs, proj, db, postdata ):
"""Identify the service and pass on the arguments to the appropiate service."""
[channel, service, postargs] = webargs.split('/', 2)
# Create a list of channels from the comma separated argument
channel_list = channel.split(',')
# Retry in case the databse is busy
tries = 0
done = False
# Process the arguments
try:
rest_args = restargs.BrainRestArgs ();
rest_args.cutoutArgs ( postargs, proj.datasetcfg )
except restargs.RESTArgsError as e:
logger.error( "REST Arguments {} failed: {}".format(postargs,e) )
raise NDWSError(e)
corner = rest_args.getCorner()
dimension = rest_args.getDim()
resolution = rest_args.getResolution()
timerange = rest_args.getTimeRange()
neariso = rest_args.getZScaling()
direct = rest_args.getDirect()
conflictopt = restargs.conflictOption ( "" )
while not done and tries < 5:
try:
# if it's a 3d tiff treat differently. No cutout args.
if service == 'tiff':
return postTiff3d ( channel, postargs, proj, db, postdata )
elif service == 'blaze':
for channel_name in channel_list:
ch = proj.getChannelObj(channel_name)
db.writeBlazeCuboid(ch, corner, resolution, postdata, timerange=timerange)
elif service == 'hdf5':
# Get the HDF5 file.
with closing (tempfile.NamedTemporaryFile ( )) as tmpfile:
tmpfile.write ( postdata )
tmpfile.seek(0)
h5f = h5py.File ( tmpfile.name, driver='core', backing_store=False )
for channel_name in channel_list:
ch = proj.getChannelObj(channel_name)
chgrp = h5f.get(ch.channel_name)
voxarray = chgrp['CUTOUT'].value
h5_datatype = h5f.get(ch.channel_name)['DATATYPE'].value[0]
h5_channeltype = h5f.get(ch.channel_name)['CHANNELTYPE'].value[0]
# h5xyzoffset = chgrp.get('XYZOFFSET')
# h5resolution = chgrp.get('RESOLUTION')[0]
# Checking the datatype of the voxarray
if voxarray.dtype != ND_dtypetonp[ch.channel_datatype]:
logger.error("Channel datatype {} in the HDF5 file does not match with the {} in the database.".format(h5_datatype, ch.channel_datatype))
raise NDWSError("Channel datatype {} in the HDF5 file does not match with the {} in the database.".format(h5_datatype, ch.channel_datatype))
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.channel_name, proj.project_name, webargs))
raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.channel_name, proj.project_name, webargs))
# reshape the data to 4d if no timerange
if timerange == None:
voxarray = voxarray.reshape((1,voxarray.shape[0],voxarray.shape[1],voxarray.shape[2]))
efftimerange = ch.default_time_range
else:
efftimerange = timerange
# checking if the dimension for x,y,z,t(optional) are correct
# this is different then the one for blosc/numpy because channels are packed separately
if voxarray.shape[::-1] != tuple(dimension + [efftimerange[1]-efftimerange[0]]):
logger.error("The data has mismatched dimensions {} compared to the arguments {}".format(voxarray.shape[1:], dimension))
raise NDWSError("The data has mismatched dimensions {} compared to the arguments {}".format(voxarray.shape[1:], dimension))
if ch.channel_type in ANNOTATION_CHANNELS:
db.annotateDense ( ch, efftimerange[0], corner, resolution, voxarray, conflictopt)
else:
db.writeCuboid (ch, corner, resolution, voxarray, timerange=efftimerange, neariso=neariso, direct=direct)
h5f.flush()
h5f.close()
# other services take cutout args
elif service in ['npz', 'blosc']:
# get the data out of the compressed blob
if service == 'npz':
rawdata = zlib.decompress ( postdata )
fileobj = cStringIO.StringIO ( rawdata )
voxarray = np.load ( fileobj )
elif service == 'blosc':
voxarray = blosc.unpack_array(postdata)
if voxarray.shape[0] != len(channel_list):
logger.error("The data has some missing channels")
raise NDWSError("The data has some missing channels")
# reshape the data to 4d if no timerange
if timerange is None:
voxarray = voxarray.reshape((voxarray.shape[0],1,voxarray.shape[1],voxarray.shape[2],voxarray.shape[3]))
# need to create a temporary channel here for fetching timerange
ch = proj.getChannelObj(channel_list[0])
efftimerange = ch.default_time_range
else:
efftimerange = timerange
# checking if the dimension for x,y,z,t(optional) are correct
if voxarray.shape[1:][::-1] != tuple(dimension + [efftimerange[1]-efftimerange[0]]):
logger.error("The data has mismatched dimensions {} compared to the arguments {}".format(voxarray.shape[1:], dimension))
raise NDWSError("The data has mismatched dimensions {} compared to the arguments {}".format(voxarray.shape[1:], dimension))
for idx, channel_name in enumerate(channel_list):
ch = proj.getChannelObj(channel_name)
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.channel_name, proj.project_name, webargs))
raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.channel_name, proj.project_name, webargs))
if not voxarray.dtype == ND_dtypetonp[ch.channel_datatype]:
logger.error("Wrong datatype in POST")
raise NDWSError("Wrong datatype in POST")
if ch.channel_type in ANNOTATION_CHANNELS:
db.annotateDense(ch, efftimerange[0], corner, resolution, voxarray[idx,:], conflictopt )
else:
db.writeCuboid(ch, corner, resolution, voxarray[idx,:], efftimerange, neariso=neariso, direct=direct)
else:
logger.error("An illegal Web POST service was requested: {}. Args {}".format(service, webargs))
raise NDWSError("An illegal Web POST service was requested: {}. Args {}".format(service, webargs))
done = True
# rollback if you catch an error
except MySQLdb.OperationalError, e:
logger.warning("Transaction did not complete. {}".format(e))
tries += 1
continue
except MySQLdb.Error, e:
logger.error("POST transaction rollback. {}".format(e))
raise NDWSError("POST transaction rollback. {}".format(e))
except Exception, e:
logger.exception("POST transaction rollback. {}".format(e))
raise NDWSError("POST transaction rollback. {}".format(e))
def getCutout ( webargs ):
"""Interface to the cutout service for annotations.Load the annotation project and invoke the appropriate dataset."""
#[ token, sym, rangeargs ] = webargs.partition ('/')
[token, webargs] = webargs.split('/', 1)
[channel, service, chanargs] = webargs.split('/', 2)
# get the project
proj = NDProject.fromTokenName(token)
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
return selectService ( service, webargs, proj, db )
def postCutout ( webargs, postdata ):
"""Interface to the write cutout data. Load the annotation project and invoke the appropriate dataset"""
[ token, rangeargs ] = webargs.split('/',1)
# get the project
proj = NDProject.fromTokenName(token)
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
return selectPost ( rangeargs, proj, db, postdata )
################# RAMON interfaces #######################
"""An enumeration for options processing in getAnnotation"""
AR_NODATA = 0
AR_VOXELS = 1
AR_CUTOUT = 2
AR_TIGHTCUTOUT = 3
AR_BOUNDINGBOX = 4
AR_CUBOIDS = 5
def getAnnoDictById ( ch, annoid, proj, rdb ):
"""Retrieve the annotation and return it as a Python dictionary"""
# retrieve the annotation
anno = rdb.getAnnotation ( ch, annoid )
if anno == None:
logger.error("No annotation found at identifier = %s" % (annoid))
raise NDWSError ("No annotation found at identifier = %s" % (annoid))
# the json interface returns anno_id -> dictionary containing annotation info
tmpdict = {
annoid: anno.toDict()
}
# return dictionary
return tmpdict
def getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution=None, corner=None, dim=None ):
"""Retrieve the annotation and put it in the HDF5 file."""
# retrieve the annotation
anno = rdb.getAnnotation ( ch, annoid )
if anno == None:
logger.error("No annotation found at identifier = %s" % (annoid))
raise NDWSError ("No annotation found at identifier = %s" % (annoid))
# create the HDF5 object
h5anno = h5ann.AnnotationtoH5 ( anno, h5f )
# only return data for annotation types that have data
if anno.__class__ in [AnnSeed] and dataoption != AR_NODATA:
logger.error("No data associated with annotation type %s" % ( anno.__class__))
raise NDWSError ("No data associated with annotation type %s" % ( anno.__class__))
# determine if it is a compound type (NEURON) and get the list of relevant segments
if anno.__class__ in [AnnNeuron] and dataoption != AR_NODATA:
dataids = rdb.getSegments ( ch, annoid )
else:
dataids = [anno.annid]
# get the voxel data if requested
if dataoption == AR_VOXELS:
# RBTODO Need to make voxels zoom
allvoxels = []
# add voxels for all of the ids
for dataid in dataids:
voxlist = db.getLocations(ch, dataid, timestamp, resolution )
if len(voxlist) != 0:
allvoxels = allvoxels + voxlist
allvoxels = [ el for el in set ( [ tuple(t) for t in allvoxels ] ) ]
h5anno.addVoxels ( resolution, allvoxels )
# support list of IDs to filter cutout
elif dataoption == AR_CUTOUT:
# cutout the data with the and remap for neurons.
if anno.__class__ in [AnnNeuron] and dataoption != AR_NODATA:
cb = db.annoCutout(ch, dataids, timestamp, resolution, corner, dim, annoid )
else:
# don't need to remap single annotations
cb = db.annoCutout(ch, dataids, timestamp, resolution, corner, dim, None )
# again an abstraction problem with corner. return the corner to cutout arguments space
offset = proj.datasetcfg.get_offset(resolution)
retcorner = [corner[0]+offset[0], corner[1]+offset[1], corner[2]+offset[2]]
h5anno.addCutout ( resolution, retcorner, cb.data.reshape(cb.data.shape[1:]))
elif dataoption == AR_TIGHTCUTOUT:
# determine if it is a compound type (NEURON) and get the list of relevant segments
if anno.__class__ in [AnnNeuron] and dataoption != AR_NODATA:
dataids = rdb.getSegments(ch, annoid)
else:
dataids = [anno.annid]
# get the bounding box from the index
bbcorner, bbdim = db.getBoundingCube(ch, dataids, timestamp, resolution )
# figure out which ids are in object
if bbcorner != None:
if bbdim[0]*bbdim[1]*bbdim[2] >= 1024*1024*256:
logger.error ("Cutout region is inappropriately large. Dimension: %s,%s,%s" % (bbdim[0],bbdim[1],bbdim[2]))
raise NDWSError ("Cutout region is inappropriately large. Dimension: %s,%s,%s" % (bbdim[0],bbdim[1],bbdim[2]))
# Call the cuboids interface to get the minimum amount of data
if anno.__class__ == AnnNeuron:
offsets = db.annoCubeOffsets(ch, dataids, timestamp, resolution, annoid )
else:
offsets = db.annoCubeOffsets(ch, [annoid], timestamp, resolution)
datacuboid = None
# get a list of indexes in XYZ space
# for each cube in the index, add it to the data cube
for (offset,cbdata) in offsets:
if datacuboid == None:
datacuboid = np.zeros ( (bbdim[2],bbdim[1],bbdim[0]), dtype=cbdata.dtype )
datacuboid [ offset[2]-bbcorner[2]:offset[2]-bbcorner[2]+cbdata.shape[1], offset[1]-bbcorner[1]:offset[1]-bbcorner[1]+cbdata.shape[2], offset[0]-bbcorner[0]:offset[0]-bbcorner[0]+cbdata.shape[3] ] = cbdata [0,:,:,:]
offset = proj.datasetcfg.get_offset(resolution)
bbcorner = map(add, bbcorner, offset)
h5anno.addCutout ( resolution, bbcorner, datacuboid )
elif dataoption == AR_BOUNDINGBOX:
# determine if it is a compound type (NEURON) and get the list of relevant segments
if anno.__class__ in [AnnNeuron] and dataoption != AR_NODATA:
dataids = rdb.getSegments(ch, annoid)
else:
dataids = [anno.annid]
bbcorner, bbdim = db.getBoundingBox(ch, dataids, resolution)
h5anno.addBoundingBox(resolution, bbcorner, bbdim)
# populate with a minimal list of cuboids
elif dataoption == AR_CUBOIDS:
#CUBOIDS don't work at zoom resolution
h5anno.mkCuboidGroup(resolution)
if anno.__class__ == AnnNeuron:
offsets = db.annoCubeOffsets(ch, dataids, resolution, annoid)
else:
offsets = db.annoCubeOffsets(ch, [annoid], resolution)
# get a list of indexes in XYZ space
# for each cube in the index, add it to the hdf5 file
for (offset,cbdata) in offsets:
h5anno.addCuboid ( offset, cbdata )
def getAnnotation ( webargs ):
"""Fetch a RAMON object as HDF5 by object identifier"""
[token, channel, otherargs] = webargs.split('/', 2)
# pattern for using contexts to close databases
# get the project
proj = NDProject.fromTokenName(token)
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
with closing (RamonDB(proj)) as rdb:
# Split the URL and get the args
ch = NDChannel.fromName(proj, channel)
option_args = otherargs.split('/', 2)
# AB Added 20151011
# Check to see if this is a JSON request, and if so return the JSON objects otherwise, continue with returning the HDF5 data
# RBTODO add timestamp to json?
if option_args[1] == 'json':
annobjs = {}
try:
if re.match ( '^[\d,]+$', option_args[0] ):
annoids = map(int, option_args[0].split(','))
for annoid in annoids:
annobjs.update(getAnnoDictById ( ch, annoid, proj, rdb ))
jsonstr = json.dumps( annobjs )
except Exception, e:
logger.error("Error: {}".format(e))
raise NDWSError("Error: {}".format(e))
return jsonstr
# not a json request, continue with building and returning HDF5 file
# Make the HDF5 file
# Create an in-memory HDF5 file
tmpfile = tempfile.NamedTemporaryFile()
h5f = h5py.File ( tmpfile.name,"w" )
# RBTODO get the timestamp or timerange from the HDF5 file
timestamp = 0
try:
# if the first argument is numeric. it is an annoid
if re.match ( '^[\d,]+$', option_args[0] ):
annoids = map(int, option_args[0].split(','))
for annoid in annoids:
# if it's a compoun data type (NEURON) get the list of data ids
# default is no data
if option_args[1] == '' or option_args[1] == 'nodata':
dataoption = AR_NODATA
getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp )
# if you want voxels you either requested the resolution id/voxels/resolution
# or you get data from the default resolution
elif option_args[1] == 'voxels':
dataoption = AR_VOXELS
try:
[resstr, sym, rest] = option_args[2].partition('/')
resolution = int(resstr)
except:
logger.error("Improperly formatted voxel arguments {}".format(option_args[2]))
raise NDWSError("Improperly formatted voxel arguments {}".format(option_args[2]))
getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution )
# or you get data from the default resolution
elif option_args[1] == 'cuboids':
dataoption = AR_CUBOIDS
try:
[resstr, sym, rest] = option_args[2].partition('/')
resolution = int(resstr)
except:
logger.error("Improperly formatted cuboids arguments {}".format(option_args[2]))
raise NDWSError("Improperly formatted cuboids arguments {}".format(option_args[2]))
getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution )
elif option_args[1] =='cutout':
# if there are no args or only resolution, it's a tight cutout request
if option_args[2] == '' or re.match('^\d+[\w\/]*$', option_args[2]):
dataoption = AR_TIGHTCUTOUT
try:
[resstr, sym, rest] = option_args[2].partition('/')
resolution = int(resstr)
except:
logger.error ( "Improperly formatted cutout arguments {}".format(option_args[2]))
raise NDWSError("Improperly formatted cutout arguments {}".format(option_args[2]))
getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution )
else:
dataoption = AR_CUTOUT
# Perform argument processing
brargs = restargs.BrainRestArgs ();
brargs.cutoutArgs ( option_args[2], proj.datasetcfg )
# Extract the relevant values
corner = brargs.getCorner()
dim = brargs.getDim()
resolution = brargs.getResolution()
getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution, corner, dim )
elif option_args[1] == 'boundingbox':
dataoption = AR_BOUNDINGBOX
try:
[resstr, sym, rest] = option_args[2].partition('/')
resolution = int(resstr)
except:
logger.error("Improperly formatted bounding box arguments {}".format(option_args[2]))
raise NDWSError("Improperly formatted bounding box arguments {}".format(option_args[2]))
getAnnoById ( ch, annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution )
else:
logger.error ("Fetch identifier {}. Error: no such data option {}".format( annoid, option_args[1] ))
raise NDWSError ("Fetch identifier {}. Error: no such data option {}".format( annoid, option_args[1] ))
# the first argument is not numeric. it is a service other than getAnnotation
else:
logger.error("Get interface {} requested. Illegal or not implemented. Args: {}".format( option_args[0], webargs ))
raise NDWSError ("Get interface {} requested. Illegal or not implemented".format( option_args[0] ))
# Close the file on a error: it won't get closed by the Web server
except:
h5f.close()
raise
# Close the HDF5 file always
h5f.flush()
h5f.close()
# Return the HDF5 file
tmpfile.seek(0)
return tmpfile.read()
def getCSV ( webargs ):
"""Fetch a RAMON object as CSV. Always includes bounding box. No data option."""
[ token, csvliteral, annoid, reststr ] = webargs.split ('/',3)
# pattern for using contexts to close databases
# get the project
proj = NDProject.fromTokenName(token)
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
with closing (RamonDB(proj)) as rdb:
# Make the HDF5 file
# Create an in-memory HDF5 file
with closing (tempfile.NamedTemporaryFile()) as tmpfile:
h5f = h5py.File ( tmpfile.name )
try:
dataoption = AR_BOUNDINGBOX
try:
[resstr, sym, rest] = reststr.partition('/')
resolution = int(resstr)
except:
logger.error ( "Improperly formatted cutout arguments {}".format(reststr))
raise NDWSError("Improperly formatted cutout arguments {}".format(reststr))
getAnnoById ( annoid, h5f, proj, rdb, db, dataoption, timestamp, resolution )
# convert the HDF5 file to csv
csvstr = h5ann.h5toCSV ( h5f )
finally:
h5f.close()
return csvstr
def getAnnotations ( webargs, postdata ):
"""Get multiple annotations. Takes an HDF5 that lists ids in the post."""
[ token, objectsliteral, otherargs ] = webargs.split ('/',2)
proj = NDProject.fromTokenName(token)
with closing (SpatialDB(proj)) as db:
with closing (RamonDB(proj)) as rdb:
# Read the post data HDF5 and get a list of identifiers
tmpinfile = tempfile.NamedTemporaryFile ( )
tmpinfile.write ( postdata )
tmpinfile.seek(0)
h5in = h5py.File ( tmpinfile.name )
try:
# IDENTIFIERS
if not h5in.get('ANNOIDS'):
logger.error ("Requesting multiple annotations. But no HDF5 \'ANNOIDS\' field specified.")
raise NDWSError ("Requesting multiple annotations. But no HDF5 \'ANNOIDS\' field specified.")
# GET the data out of the HDF5 file. Never operate on the data in place.
annoids = h5in['ANNOIDS'][:]
# set variables to None: need them in call to getAnnoByID, but not all paths set all
corner = None
dim = None
resolution = None
dataarg = ''
# process options
# Split the URL and get the args
if otherargs != '':
( dataarg, cutout ) = otherargs.split('/', 1)
if dataarg =='' or dataarg == 'nodata':
dataoption = AR_NODATA
elif dataarg == 'voxels':
dataoption = AR_VOXELS
# only arg to voxels is resolution
try:
[resstr, sym, rest] = cutout.partition('/')
resolution = int(resstr)
except:
logger.error ( "Improperly formatted voxel arguments {}".format(cutout))
raise NDWSError("Improperly formatted voxel arguments {}".format(cutout))
elif dataarg == 'cutout':
# if blank of just resolution then a tightcutout
if cutout == '' or re.match('^\d+[\/]*$', cutout):
dataoption = AR_TIGHTCUTOUT
try:
[resstr, sym, rest] = cutout.partition('/')
resolution = int(resstr)
except:
logger.error ( "Improperly formatted cutout arguments {}".format(cutout))
raise NDWSError("Improperly formatted cutout arguments {}".format(cutout))
else:
dataoption = AR_CUTOUT
# Perform argument processing
brargs = restargs.BrainRestArgs()
brargs.cutoutArgs(cutout, proj.datsetcfg)
# Extract the relevant values
corner = brargs.getCorner()
dim = brargs.getDim()
resolution = brargs.getResolution()
# RBTODO test this interface
elif dataarg == 'boundingbox':
# if blank of just resolution then a tightcutout
if cutout == '' or re.match('^\d+[\/]*$', cutout):
dataoption = AR_BOUNDINGBOX
try:
[resstr, sym, rest] = cutout.partition('/')
resolution = int(resstr)
except:
logger.error ( "Improperly formatted bounding box arguments {}".format(cutout))
raise NDWSError("Improperly formatted bounding box arguments {}".format(cutout))
else:
logger.error ("In getAnnotations: Error: no such data option %s " % ( dataarg ))
raise NDWSError ("In getAnnotations: Error: no such data option %s " % ( dataarg ))
try:
# Make the HDF5 output file
# Create an in-memory HDF5 file
tmpoutfile = tempfile.NamedTemporaryFile()
h5fout = h5py.File ( tmpoutfile.name )
# get annotations for each identifier
for annoid in annoids:
# the int here is to prevent using a numpy value in an inner loop. This is a 10x performance gain.
getAnnoById ( int(annoid), h5fout, proj, rdb, db, dataoption, timestamp, resolution, corner, dim )
except:
h5fout.close()
tmpoutfile.close()
finally:
# close temporary file
h5in.close()
tmpinfile.close()
# Transmit back the populated HDF5 file
h5fout.flush()
h5fout.close()
tmpoutfile.seek(0)
return tmpoutfile.read()
def putAnnotation ( webargs, postdata ):
"""Put a RAMON object as HDF5 (or JSON) by object identifier"""
[token, channel, optionsargs] = webargs.split('/',2)
proj = NDProject.fromTokenName(token)
ch = NDChannel.fromName(proj, channel)
if ch.channel_type not in ANNOTATION_CHANNELS:
logger.error("Channel {} does not support annotations".format(ch.channel_name))
raise NDWSError("Channel {} does not support annotations".format(ch.channel_name))
with closing (SpatialDB(proj)) as db:
with closing (RamonDB(proj)) as rdb:
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.getChannelName(), proj.project_name, webargs))
raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.getChannelName(), proj.project_name, webargs))
# return string of id values
retvals = []
# check to see if we're doing a JSON post or HDF5 post
if 'json' in optionsargs.split('/'):
annobjdict = json.loads(postdata)
if len(annobjdict.keys()) != 1:
# for now we just accept a single annotation
logger.error("JSON post interface can only accept one annotation. Tried to post {}.".format(len(annobjdict.keys())))
raise NDWSError("JSON post interface can only accept one annotation. Tried to post {}.".format(len(annobjdict.keys())))
# create annotation object by type
annotype = annobjdict[ annobjdict.keys()[0] ]['ann_type']
if annotype == ANNO_ANNOTATION:
anno = Annotation( rdb, ch )
elif annotype == ANNO_SYNAPSE:
anno = AnnSynapse( rdb, ch )
elif annotype == ANNO_SEED:
anno = AnnSeed( rdb, ch )
elif annotype == ANNO_SEGMENT:
anno = AnnSegment( rdb, ch )
elif annotype == ANNO_NEURON:
anno = AnnNeuron( rdb, ch )
elif annotype == ANNO_ORGANELLE:
anno = AnnOrganelle( rdb, ch )
elif annotype == ANNO_NODE:
anno = AnnNode( rdb, ch )
elif annotype == ANNO_SKELETON:
anno = AnnSkeleton( rdb, ch )
elif annotype == ANNO_ROI:
anno = AnnROI( rdb, ch )
anno.fromDict( annobjdict[ annobjdict.keys()[0] ] )
# if the post is an update
if 'update' in optionsargs.split('/'):
rdb.putAnnotation(ch, anno, 'update')
else:
# set the ID (if provided)
anno.setField('annid', (rdb.assignID(ch,anno.annid)))
# ABTODO not taking any options? need to define
options = []
# Put into the database
rdb.putAnnotation(ch, anno, options)
retvals.append(anno.annid)
retstr = ','.join(map(str, retvals))
# return the identifier
return retstr
else:
# Make a named temporary file for the HDF5
with closing (tempfile.NamedTemporaryFile()) as tmpfile:
tmpfile.write ( postdata )
tmpfile.seek(0)
h5f = h5py.File ( tmpfile.name, driver='core', backing_store=False )
# get the conflict option if it exists
options = optionsargs.split('/')
if 'preserve' in options:
conflictopt = 'P'
elif 'exception' in options:
conflictopt = 'E'
else:
conflictopt = 'O'
try:
if len(h5f.keys()) == 0:
logger.error("Error. Failed to parse HDF5 file because it was empty.")
raise NDWSError("Error. Failed to parse HDF5 file becuase it was empty.")
for k in h5f.keys():
idgrp = h5f.get(k)
# Convert HDF5 to annotation
anno = h5ann.H5toAnnotation(k, idgrp, db, ch)
# set the identifier (separate transaction)
if not ('update' in options or 'dataonly' in options or 'reduce' in options):
anno.setField('annid',(rdb.assignID(ch,anno.annid)))
tries = 0
done = False
while not done and tries < 5:
try:
if anno.__class__ in [AnnNeuron, AnnSeed] and ( idgrp.get('VOXELS') or idgrp.get('CUTOUT')):
logger.warning ("Cannot write to annotation type {}".format(anno.__class__))
raise NDWSError ("Cannot write to annotation type {}".format(anno.__class__))
if 'update' in options and 'dataonly' in options:
logger.warning ("Illegal combination of options. Cannot use udpate and dataonly together")
raise NDWSError ("Illegal combination of options. Cannot use udpate and dataonly together")
elif not 'dataonly' in options and not 'reduce' in options:
# Put into the database
rdb.putAnnotation(ch, anno, options)
# data portion of the put
#RBTODO get timestamp from HDF5
timestamp=0
# Get the resolution if it's specified
if 'RESOLUTION' in idgrp:
resolution = int(idgrp.get('RESOLUTION')[0])
# Load the data associated with this annotation
# Is it voxel data?
if 'VOXELS' in idgrp:
voxels = np.array(idgrp.get('VOXELS'),dtype=np.uint32)
voxels = voxels - proj.datasetcfg.offset[resolution]
else:
voxels = None
if voxels!=None and 'reduce' not in options:
if 'preserve' in options:
conflictopt = 'P'
elif 'exception' in options:
conflictopt = 'E'
else:
conflictopt = 'O'
# Check that the voxels have a conforming size:
if voxels.shape[1] != 3:
logger.warning ("Voxels data not the right shape. Must be (:,3). Shape is %s" % str(voxels.shape))
raise NDWSError ("Voxels data not the right shape. Must be (:,3). Shape is %s" % str(voxels.shape))
exceptions = db.annotate ( ch, anno.annid, timestamp, resolution, voxels, conflictopt )
# Otherwise this is a shave operation
elif voxels != None and 'reduce' in options:
# Check that the voxels have a conforming size:
if voxels.shape[1] != 3:
logger.warning ("Voxels data not the right shape. Must be (:,3). Shape is %s" % str(voxels.shape))
raise NDWSError ("Voxels data not the right shape. Must be (:,3). Shape is %s" % str(voxels.shape))
db.shave ( ch, anno.annid, timestamp, resolution, voxels )
# Is it dense data?
if 'CUTOUT' in idgrp:
cutout = np.array(idgrp.get('CUTOUT'),dtype=np.uint32)
else:
cutout = None
if 'XYZOFFSET' in idgrp:
h5xyzoffset = idgrp.get('XYZOFFSET')
else:
h5xyzoffset = None
if cutout != None and h5xyzoffset != None and 'reduce' not in options:
# the zstart in datasetcfg is sometimes offset to make it aligned.
# Probably remove the offset is the best idea. and align data
# to zero regardless of where it starts. For now.
offset = proj.datasetcfg.offset[resolution]
corner = map(sub, h5xyzoffset, offset)
db.annotateEntityDense ( ch, anno.annid, timestamp, corner, resolution, np.array(cutout), conflictopt )
elif cutout != None and h5xyzoffset != None and 'reduce' in options:
offset = proj.datasetcfg.offset[resolution]
corner = map(sub, h5xyzoffset,offset)
db.shaveEntityDense ( ch, anno.annid, timestamp, corner, resolution, np.array(cutout))
elif cutout != None or h5xyzoffset != None:
#TODO this is a loggable error
pass
# Is it dense data?
if 'CUBOIDS' in idgrp:
cuboids = h5ann.H5getCuboids(idgrp)
for (corner, cuboiddata) in cuboids:
db.annotateEntityDense ( anno.annid, timestamp, corner, resolution, cuboiddata, conflictopt )
# only add the identifier if you commit
if not 'dataonly' in options and not 'reduce' in options:
retvals.append(anno.annid)
# Here with no error is successful
done = True
# rollback if you catch an error
except MySQLdb.OperationalError, e:
logger.warning("Put Anntotation: Transaction did not complete. {}".format(e))
tries += 1
continue
except MySQLdb.Error, e:
logger.error("Put Annotation: Put transaction rollback. {}".format(e))
raise NDWSError("Put Annotation: Put transaction rollback. {}".format(e))
except Exception, e:
logger.exception("Put Annotation:Put transaction rollback. {}".format(e))
raise NDWSError("Put Annotation:Put transaction rollback. {}".format(e))
finally:
h5f.close()
retstr = ','.join(map(str, retvals))
# return the identifier
return retstr
def getNIFTI ( webargs ):
"""Return the entire channel as a NIFTI file.
Limited to 2Gig"""
[token, channel, optionsargs] = webargs.split('/',2)
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
with closing (SpatialDB(proj)) as db:
ch = NDChannel.fromName(proj, channel)
# Make a named temporary file for the nii file
with closing(tempfile.NamedTemporaryFile(suffix='.nii.gz')) as tmpfile:
queryNIFTI ( tmpfile, ch, db, proj )
tmpfile.seek(0)
return tmpfile.read()
def putNIFTI ( webargs, postdata ):
"""Put a NIFTI object as an image"""
[token, channel, optionsargs] = webargs.split('/',2)
proj = NDProject.fromTokenName(token)
with closing (SpatialDB(proj)) as db:
# get ready to create a channel
if "create" in optionsargs:
ch = None
createflag = True
else:
createflag = False
ch = NDChannel.fromName(proj, channel)
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.getChannelName(), proj.project_name, webargs))
raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.getChannelName(), proj.project_name, webargs))
if "annotations" in optionsargs:
annotationsflag=True
else:
annotationsflag=False
# check the magic number -- is it a gz file?
if postdata[0] == '\x1f' and postdata[1] == '\x8b':
# Make a named temporary file
with closing (tempfile.NamedTemporaryFile(suffix='.nii.gz')) as tmpfile:
tmpfile.write ( postdata )
tmpfile.seek(0)
# ingest the nifti file
ingestNIFTI ( tmpfile.name, ch, db, proj, channel_name = channel, create=createflag, annotations=annotationsflag )
else:
# Make a named temporary file
with closing (tempfile.NamedTemporaryFile(suffix='.nii')) as tmpfile:
tmpfile.write ( postdata )
tmpfile.seek(0)
# ingest the nifti file
ingestNIFTI ( tmpfile.name, ch, db, proj, channel_name = channel, create=createflag, annotations=annotationsflag )
# def getSWC ( webargs ):
# """Return an SWC object generated from Skeletons/Nodes"""
# [token, channel, service, rest] = webargs.split('/',3)
# proj = projdb.fromTokenName(token)
# ch = NDChannel.fromName(proj, channel)
# with closing (RamonDB(proj)) as db:
# # Make a named temporary file for the SWC
# with closing (tempfile.NamedTemporaryFile()) as tmpfile:
# # if skeleton ids are specified, use those
# if rest:
# skelids = map ( int, rest.rstrip('/').split(',') )
# # otherwise get all skeletons
# else:
# skelids=db.getKVQuery(ch, 'ann_type', ANNO_SKELETON)
# ndwsskel.querySWC ( tmpfile, ch, db, proj, skelids )
# tmpfile.seek(0)
# return tmpfile.read()
# def putSWC ( webargs, postdata ):
# """Put an SWC object into RAMON skeleton/tree nodes"""
# [token, channel, service, optionsargs] = webargs.split('/',3)
# proj = projdb.fromTokenName(token)
# ch = NDChannel.fromName(proj, channel)
# with closing (RamonDB(proj)) as rdb:
# # Don't write to readonly channels
# if ch.readonly == READONLY_TRUE:
# logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.getChannelName(), proj.project_name, webargs))
# raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.getChannelName(), proj.project_name, webargs))
# # Make a named temporary file for the HDF5
# with closing (tempfile.NamedTemporaryFile()) as tmpfile:
# tmpfile.write ( postdata )
# tmpfile.seek(0)
# # Parse the swc file into skeletons
# swc_skels = ndwsskel.ingestSWC ( tmpfile, ch, rdb )
# return swc_skels
def queryAnnoObjects ( webargs, postdata=None ):
"""Return a list of anno ids restricted by equality predicates. Equalities are alternating in field/value in the url."""
try:
m = re.search("(\w+)/(\w+)/query/(.*)/?$", webargs)
[token, channel, restargs] = [i for i in m.groups()]
except Exception, e:
logger.error("Wrong arguments {}. {}".format(webargs, e))
raise NDWSError("Wrong arguments {}. {}".format(webargs, e))
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
with closing (SpatialDB(proj)) as db:
with closing (RamonDB(proj)) as rdb:
ch = NDChannel.fromName(proj,channel)
annoids = rdb.getAnnoObjects(ch, restargs.split('/'))
# We have a cutout as well
if postdata:
# RB TODO this is a brute force implementation. This probably needs to be optimized to use several different execution strategies based on the cutout size and the number of objects.
# Make a named temporary file for the HDF5
with closing (tempfile.NamedTemporaryFile()) as tmpfile:
tmpfile.write ( postdata )
tmpfile.seek(0)
h5f = h5py.File ( tmpfile.name, driver='core', backing_store=False )
try:
resolution = h5f['RESOLUTION'][0]
offset = proj.datasetcfg.offset[resolution]
corner = map(sub, h5f['XYZOFFSET'], offset)
dim = h5f['CUTOUTSIZE'][:]
if not proj.datasetcfg.checkCube(resolution, corner, dim):
logger.error("Illegal cutout corner={}, dim={}".format(corner, dim))
raise NDWSError("Illegal cutout corner={}, dim={}".format( corner, dim))
cutout = db.cutout(ch, corner, dim, resolution)
# KL TODO On same lines as filter. Not yet complete. Called annoidIntersect()
# Check if cutout as any non zeros values
if cutout.isNotZeros():
annoids = np.intersect1d(annoids, np.unique(cutout.data))
else:
annoids = np.asarray([], dtype=np.uint32)
finally:
h5f.close()
return h5ann.PackageIDs(annoids)
def deleteAnnotation ( webargs ):
"""Delete a RAMON object"""
[ token, channel, otherargs ] = webargs.split ('/',2)
# pattern for using contexts to close databases get the project
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
with closing (RamonDB(proj)) as rdb:
ch = NDChannel.fromName(proj, channel)
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.getChannelName(), proj.project_name, webargs))
raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.getChannelName(), proj.project_name, webargs))
# Split the URL and get the args
args = otherargs.split('/', 2)
# if the first argument is numeric. it is an annoid
if re.match ( '^[\d,]+$', args[0] ):
annoids = map(np.uint32, args[0].split(','))
# if not..this is not a well-formed delete request
else:
logger.error ("Delete did not specify a legal object identifier = %s" % args[0] )
raise NDWSError ("Delete did not specify a legal object identifier = %s" % args[0] )
for annoid in annoids:
tries = 0
done = False
while not done and tries < 5:
try:
db.deleteAnnoData ( ch, annoid )
rdb.deleteAnnotation ( ch, annoid )
done = True
# rollback if you catch an error
except MySQLdb.OperationalError, e:
logger.warning("Transaction did not complete. {}".format(e))
tries += 1
continue
except MySQLdb.Error, e:
logger.error("Put transaction rollback. {}".format(e))
raise NDWSError("Put transaction rollback. {}".format(e))
raise
except Exception, e:
logger.exception("Put transaction rollback. {}".format(e))
raise NDWSError("Put transaction rollback. {}".format(e))
def jsonInfo ( webargs ):
"""Return project information in json format"""
try:
# format /token/info/
m = re.match(r'(\w+)/info/', webargs)
token = m.group(1)
except Exception, e:
logger.error("Bad URL {}".format(webargs))
raise NDWSError("Bad URL {}".format(webargs))
# get the project
proj = NDProject.fromTokenName(token)
return jsonprojinfo.jsonInfo(proj)
def xmlInfo ( webargs ):
"""Return project information in json format"""
try:
# match the format /token/volume.vikingxml
m = re.match(r'(\w+)/volume.vikingxml', webargs)
token = m.group(1)
except Exception, e:
logger.error("Bad URL {}".format(webargs))
raise NDWSError("Bad URL {}".format(webargs))
# get the project
proj = NDProject.fromTokenName(token)
return jsonprojinfo.xmlInfo(token, proj)
def projInfo ( webargs ):
[ token, projinfoliteral, rest ] = webargs.split ('/',2)
# get the project
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
# Create an in-memory HDF5 file
tmpfile = tempfile.NamedTemporaryFile ()
h5f = h5py.File ( tmpfile.name )
try:
# Populate the file with project information
h5projinfo.h5Info(proj, h5f)
finally:
h5f.close()
tmpfile.seek(0)
return tmpfile.read()
def chanInfo ( webargs ):
"""Return information about the project's channels"""
[ token, projinfoliteral, otherargs ] = webargs.split ('/',2)
# pattern for using contexts to close databases
# get the project
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
return jsonprojinfo.jsonChanInfo( proj, db )
def reserve ( webargs ):
"""Reserve annotation ids"""
[token, channel, reservestr, cnt, other] = webargs.split ('/', 4)
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
with closing (RamonDB(proj)) as rdb:
ch = NDChannel.fromName(proj,channel)
if ch.channel_type not in ANNOTATION_CHANNELS:
logger.error("Illegal project type for reserve.")
raise NDWSError("Illegal project type for reserve.")
try:
count = int(cnt)
# perform the reservation
firstid = rdb.reserve (ch, count)
return json.dumps ( (firstid, int(cnt)) )
except:
logger.error("Illegal arguments to reserve: {}".format(webargs))
raise NDWSError("Illegal arguments to reserve: {}".format(webargs))
def getField ( webargs ):
"""Return a single HDF5 field"""
try:
m = re.match("(\w+)/(\w+)/getField/(\d+)/(\w+)/$", webargs)
[token, channel, annid, field] = [i for i in m.groups()]
except:
logger.error("Illegal getField request. Wrong number of arguments.")
raise NDWSError("Illegal getField request. Wrong number of arguments.")
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
with closing (RamonDB(proj)) as rdb:
ch = NDChannel.fromName(proj, channel)
anno = rdb.getAnnotation(ch, annid)
if anno is None:
logger.error("No annotation found at identifier = {}".format(annid))
raise NDWSError ("No annotation found at identifier = {}".format(annid))
return anno.getField(field)
def setField ( webargs ):
"""Assign a single HDF5 field"""
try:
m = re.match("(\w+)/(\w+)/setField/(\d+)/(\w+)/(\w+|[\d+,.]+)/$", webargs)
[token, channel, annid, field, value] = [i for i in m.groups()]
except:
logger.error("Illegal setField request. Wrong number of arguments. Web Args: {}".format(webargs))
raise NDWSError("Illegal setField request. Wrong number of arguments. Web Args:{}".format(webargs))
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
with closing (RamonDB(proj)) as rdb:
ch = NDChannel.fromName(proj, channel)
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.error("Attempt to write to read only channel {} in project. Web Args:{}".format(ch.getChannelName(), proj.project_name, webargs))
raise NDWSError("Attempt to write to read only channel {} in project. Web Args: {}".format(ch.getChannelName(), proj.project_name, webargs))
rdb.updateAnnotation(ch, annid, field, value)
def getPropagate (webargs):
""" Return the value of the Propagate field """
# input in the format token/channel_list/getPropagate/
try:
(token, channel_list) = re.match("(\w+)/([\w+,]+)/getPropagate/$", webargs).groups()
except Exception, e:
logger.error("Illegal getPropagate request. Wrong format {}. {}".format(webargs,e))
raise NDWSError("Illegal getPropagate request. Wrong format {}. {}".format(webargs, e))
# pattern for using contexts to close databases
proj = NDProject.fromTokenName(token)
value_list = []
for channel_name in channel_list.split(','):
ch = proj.getChannelObj(channel_name)
value_list.append(ch.propagate)
return ','.join(str(i) for i in value_list)
def setPropagate(webargs):
"""Set the value of the propagate field"""
# input in the format token/channel_list/setPropagate/value/
# here value = {NOT_PROPAGATED, UNDER_PROPAGATION} not {PROPAGATED}
try:
(token, channel_list, value_list) = re.match("(\w+)/([\w+,]+)/setPropagate/([\d+,]+)/$", webargs).groups()
except Exception as e:
logger.error("Illegal setPropagate request. Wrong format {}. {}".format(webargs, e))
raise NDWSError("Illegal setPropagate request. Wrong format {}. {}".format(webargs, e))
# pattern for using contexts to close databases. get the project
proj = NDProject.fromTokenName(token)
for channel_name in channel_list.split(','):
ch = proj.getChannelObj(channel_name)
value = value_list[0]
# If the value is to be set under propagation and the project is not under propagation
if int(value) == UNDER_PROPAGATION and ch.propagate == NOT_PROPAGATED:
# and is not read only
if ch.readonly == READONLY_FALSE:
ch.propagate = UNDER_PROPAGATION
from sd.tasks import propagate
# then call propagate
# propagate(token, channel_name)
propagate.delay(token, channel_name)
else:
logger.error("Cannot Propagate this project. It is set to Read Only.")
raise NDWSError("Cannot Propagate this project. It is set to Read Only.")
# if the project is Propagated already you can set it to under propagation
elif int(value) == UNDER_PROPAGATION and ch.propagate == PROPAGATED:
logger.error("Cannot propagate a project which is propagated. Set to Not Propagated first.")
raise NDWSError("Cannot propagate a project which is propagated. Set to Not Propagated first.")
# If the value to be set is not propagated
elif int(value) == NOT_PROPAGATED:
# and the project is under propagation then throw an error
if ch.propagate == UNDER_PROPAGATION:
logger.error("Cannot set this value. Project is under propagation.")
raise NDWSError("Cannot set this value. Project is under propagation.")
# and the project is already propagated and set read only then throw error
elif ch.propagate == PROPAGATED and ch.readonly == READONLY_TRUE:
logger.error("Cannot set this Project to unpropagated. Project is Read only")
raise NDWSError("Cannot set this Project to unpropagated. Project is Read only")
else:
ch.propagate = NOT_PROPAGATED
# cannot set a project to propagated via the RESTful interface
else:
logger.error("Invalid Value {} for setPropagate".format(value))
raise NDWSError("Invalid Value {} for setPropagate".format(value))
def merge (webargs):
"""Return a single HDF5 field"""
# accepting the format token/channel_name/merge/listofids/[global]
try:
m = re.match("(\w+)/(\w+)/merge/([\d+,]+)/(\w+/\d+|/d+)/$", webargs)
#m = re.match("(\w+)/(\w+)/merge/([\d+,]+)/([\w+,/]+)/$", webargs)
[token, channel_name, relabel_ids, rest_args] = [i for i in m.groups()]
except:
logger.error("Illegal globalMerge request. Wrong number of arguments.")
raise NDWSError("Illegal globalMerber request. Wrong number of arguments.")
# get the ids from the list of ids and store it in a list vairable
ids = relabel_ids.split(',')
last_id = len(ids)-1
ids[last_id] = ids[last_id].replace("/","")
# Make ids a numpy array to speed vectorize
ids = np.array(ids, dtype=np.uint32)
# Validate ids. If ids do not exist raise errors
# pattern for using contexts to close databases, get the project
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
# and the database and then call the db function
with closing (RamonDB(proj)) as db:
ch = proj.getChannelObj(channel_name)
# Check that all ids in the id strings are valid annotation objects
for curid in ids:
obj = db.getAnnotation(ch, curid)
if obj == None:
logger.error("Invalid object id {} used in merge".format(curid))
raise NDWSError("Invalid object id used in merge")
m = re.match("global/(\d+)", rest_args)
if m.group(1) is not None:
resolution= int(m.group(1))
return db.mergeGlobal(ch, ids, 'global', int(resolution))
elif re.match("global/", rest_args) is not None:
resolution = proj.resolution
return db.mergeGlobal(ch, ids, 'global', int(resolution))
else:
# PYTODO illegal merge (no support if not global)
assert 0
def publicDatasets ( self ):
"""Return a JSON formatted list of public datasets"""
with closing (NDProjectsDB()) as projdb:
return jsonprojinfo.publicDatasets ( projdb )
def publicTokens ( self ):
"""Return a json formatted list of public tokens"""
with closing (NDProjectsDB()) as projdb:
return jsonprojinfo.publicTokens ( projdb )
def exceptions ( webargs, ):
"""list of multiply defined voxels in a cutout"""
[token, exceptliteral, cutoutargs] = webargs.split ('/',2)
# pattern for using contexts to close databases
# get the project
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
# Perform argument processing
try:
args = restargs.BrainRestArgs ();
args.cutoutArgs ( cutoutargs, proj.datasetcfg )
except restargs.RESTArgsError, e:
logger.error("REST Arguments {} failed: {}".format(webargs,e))
raise NDWSError(e)
# Extract the relevant values
corner = args.getCorner()
dim = args.getDim()
resolution = args.getResolution()
# check to make sure it's an annotation project
if proj.channel_type not in ANNOTATION_PROJECTS :
logger.error("Asked for exceptions on project that is not of type ANNOTATIONS")
raise NDWSError("Asked for exceptions on project that is not of type ANNOTATIONS")
elif not proj.getExceptions():
logger.error("Asked for exceptions on project without exceptions")
raise NDWSError("Asked for exceptions on project without exceptions")
# Get the exceptions -- expect a rect np.array of shape x,y,z,id1,id2,...,idn where n is the longest exception list
exceptions = db.exceptionsCutout ( corner, dim, resolution )
# package as an HDF5 file
tmpfile = tempfile.NamedTemporaryFile()
fh5out = h5py.File ( tmpfile.name )
try:
# empty HDF5 file if exceptions = None
if exceptions == None:
ds = fh5out.create_dataset ( "exceptions", (3,), np.uint8 )
else:
ds = fh5out.create_dataset ( "exceptions", tuple(exceptions.shape), exceptions.dtype, compression='gzip', data=exceptions )
except:
fh5out.close()
raise
fh5out.close()
tmpfile.seek(0)
return tmpfile.read()
def minmaxProject ( webargs ):
"""Return a minimum or maximum projection across a volume by a specified plane"""
[ token, chanstr, minormax, plane, cutoutargs ] = webargs.split ('/', 4)
# split the channel string
channels = chanstr.split(",")
# check for one channel only
if len (channels) != 1:
raise NDWSError("min or max project processes one channel at a time.")
# pattern for using contexts to close databases
# get the project
with closing (NDProjectsDB()) as projdb:
proj = projdb.loadToken ( token )
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
# maxproject data
mpdata = None
channel_name = channels[0]
ch = NDChannel.fromName(proj,channel_name)
cb = cutout (cutoutargs, ch, proj, db)
FilterCube (cutoutargs, cb)
# project onto the image plane
if plane == 'xy':
# take the min project or maxproject
if minormax == 'maxproj':
cbplane = np.amax (cb.data, axis=0)
elif minormax == 'minproj':
cbplane = np.amin (cb.data, axis=0)
else:
logger.error("Illegal projection requested. Projection = {}", minormax)
raise NDWSError("Illegal image plane requested. Projections = {}", minormax)
elif plane == 'xz':
# take the min project or maxproject
if minormax == 'maxproj':
cbplane = np.amax (cb.data, axis=1)
elif minormax == 'minproj':
cbplane = np.amin (cb.data, axis=1)
else:
logger.error("Illegal projection requested. Projection = {}", minormax)
raise NDWSError("Illegal image plane requested. Projections = {}", minormax)
elif plane == 'yz':
# take the min project or maxproject
if minormax == 'maxproj':
cbplane = np.amax (cb.data, axis=2)
elif minormax == 'minproj':
cbplane = np.amin (cb.data, axis=2)
else:
logger.error("Illegal projection requested. Projection = {}", minormax)
raise NDWSError("Illegal image plane requested. Projections = {}", minormax)
# manage the color space
mpdata = window(cbplane, ch)
img = Image.frombuffer ( 'L', (mpdata.shape[1],mpdata.shape[0]), mpdata.flatten(), 'raw', 'L', 0, 1 )
fileobj = cStringIO.StringIO ( )
img.save ( fileobj, "PNG" )
fileobj.seek(0)
return fileobj.read()
def mcFalseColor ( webargs ):
"""False color image of multiple channels"""
[ token, chanstr, mcfcstr, service, cutoutargs ] = webargs.split ('/', 4)
# split the channel string
channels = chanstr.split(",")
# pattern for using contexts to close databases
# get the project
proj = NDProject.fromTokenName(token)
# and the database and then call the db function
with closing (SpatialDB(proj)) as db:
mcdata = None
for i in range(len(channels)):
# skip 0 channels
if channels[i]=='0':
continue
imageargs = '{}/{}/{}'.format(channels[i],service,cutoutargs)
cb = imgSlice (imageargs, proj, db)
if mcdata == None:
if service == 'xy':
mcdata = np.zeros((len(channels),cb.data.shape[1],cb.data.shape[2]), dtype=cb.data.dtype)
elif service == 'xz':
mcdata = np.zeros((len(channels),cb.data.shape[0],cb.data.shape[2]), dtype=cb.data.dtype)
elif service == 'yz':
mcdata = np.zeros((len(channels),cb.data.shape[0],cb.data.shape[1]), dtype=cb.data.dtype)
else:
logger.error( "No such service {}. Arguments {}".format(service, webargs))
raise NDWSError( "No such service {}. Arguments {}".format(service, webargs))
mcdata[i:] = cb.data
# We have an compound array. Now color it.
colors = ('C','M','Y','R','G','B')
img = mcfc.mcfcPNG ( mcdata, colors, 2.0 )
fileobj = cStringIO.StringIO ( )
img.save ( fileobj, "PNG" )
fileobj.seek(0)
return fileobj.read()
|
neurodata/ndstore
|
webservices/ndwsrest.py
|
Python
|
apache-2.0
| 83,674
|
[
"NEURON"
] |
17830706453c43f9b94c81c54ec13b8a19172d657f99501c2b83329bb0391926
|
#!/usr/bin/env python
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### __ ________ _____ _____
### \ \ / / ____|/ ____| __ \ /\
### \ \ / /| |__ | (___ | |__) / \
### \ \/ / | __| \___ \| ___/ /\ \
### \ / | |____ ____) | | / ____ \
### \/ |______|_____/|_| /_/ \_\
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Authors: Andrew E. Webb, Thomas A. Walsh & Mary J. O'Connell
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
def help_message(help_requested):
global bme_command_table
if help_requested in bme_command_table:
third_party = False
help_seperator = '----------------------------------------------------------------------------------'
command_str = 'Command: {0}'.format(help_requested)
command_text = '|||||{0}|||||'.format(command_str.center(72, ' '))
print '\n{0}\n{1}\n{2}'.format(help_seperator, command_text, help_seperator)
if help_requested == 'clean':
print '''Details: QC filter for downloaded nucleotide sequences and/or genomes.
Basic usage: vespa.py clean -input=USR_INPUT
Supported file format(s): -input option: fasta formatted files'''
elif help_requested == 'ensembl_clean':
print'''Details: QC filter for identifying the longest nucleotide (canonical) transcript
within an Ensembl nucleotide genome.
Basic usage: vespa.py ensembl_clean -input=USR_INPUT
Supported file format(s): -input option: fasta formatted files'''
elif help_requested == 'translate':
print'''Details: Translates nucleotide sequences that passed the QC filter of either clean
function into amino acid sequences
Basic usage: vespa.py translate -input=USR_INPUT
Supported file format(s): -input option: fasta formatted files'''
elif help_requested == 'create_database':
print'''Details: Concatenates multiple genomes into the single database file.
Basic usage: vespa.py create_database -input=USR_INPUT
Supported file format(s): -input option: fasta formatted files'''
elif help_requested == 'gene_selection':
print '''Details: Searches a sequence database for gene identifiers specified within a
separate csv file.
Basic usage: vespa.py gene_selection -input=USR_INPUT -selection_csv=USR_INPUT
Supported file format(s): -input option: fasta formatted files,
-selection_csv option: csv formatted files'''
elif help_requested == 'similarity_groups':
print '''Details: Construct sequence similarity groups with either non-reciprocal and
reciprocal connections.
Basic usage: vespa.py similarity_groups -input=USR_INPUT -format=blast -database=USR_DB
Supported file format(s): -input option: BLAST tabular and HMMER output files
-database option: fasta formatted files
-format option: blast or hmmer'''
elif help_requested == 'reciprocal_groups':
print '''Details: Construct sequence similarity groups with only reciprocal connections.
Basic usage: vespa.py reciprocal_groups -input=USR_INPUT -format=blast -database=USR_DB
Supported file format(s): -input option: BLAST tabular and HMMER output files
-database option: fasta formatted files
-format option: blast or hmmer'''
elif help_requested == 'best_reciprocal_groups':
print '''Details: Construct sequence similarity groups with only reciprocal connections
that share the best E-value for each species.
Basic usage: vespa.py best_reciprocal_groups -input=USR_INPUT -format=blast -database=USR_DB
Supported file format(s): -input option: BLAST tabular and HMMER output files
-database option: fasta formatted files
-format option: blast or hmmer'''
elif help_requested == 'metal_compare':
third_party = True
print '''Details: Automates MSA comparison, scoring, and selection using the third-party
programs MetAl and noRMD.
Basic usage: vespa.py metal_compare -input=USR_INPUT -compare=USR_INPUT
Supported file format(s): -input and -compare options: fasta formatted files'''
elif help_requested == 'prottest_setup':
third_party = True
print '''Details: Automates the identification the best-fit model of amino acid replacement
for a protein MSAs using the third-party program ProtTest3.
Basic usage: vespa.py prottest_setup -input=USR_INPUT
Supported file format(s): -input option: fasta formatted files'''
elif help_requested == 'prottest_reader':
third_party = True
print '''Details: Automates the process of reading the output of the third-party program
ProtTest3.
Basic usage: vespa.py prottest_reader -input=USR_INPUT
Supported file format(s): -input option: ProtTest3 output format'''
elif help_requested == 'mrbayes_setup':
third_party = True
print '''Details: Simplifies phylogenetic reconstruction using the third-party program MrBayes
by creating NEXUS formatted files with MrBayes command blocks.
Basic usage: vespa.py mrbayes_setup -input=USR_INPUT -model_list=MODEL_DATA
Supported file format(s): -input option: fasta formatted files
-model_list prottest_reader supported_output files'''
elif help_requested == 'map_alignments':
print '''Details: Automates the conversion of protein MSAs to nucleotide (codon) MSAs required
for codeML.
Basic usage: vespa.py map_alignments -input=USR_INPUT -database=USR_DB
Supported file format(s): -input and -database options: fasta formatted files'''
elif help_requested == 'infer_genetree':
print '''Details: Automates the creation of the corresponding gene tree for a MSA using a
user-specified species tree.
Basic usage: vespa.py infer_genetree -input=USR_INPUT -species_tree=USR_INPUT
Supported file format(s): -input option: fasta formatted files
-species_tree option: newick formatted files'''
elif help_requested == 'codeml_setup':
print '''Details: Automates the creation of the complex codeML directory structure.
Basic usage: vespa.py setup_codeml -input=USR_INPUT
Supported file format(s): -input option: fasta formatted files with corresponding
newick formatted files'''
elif help_requested == 'mrbayes_reader':
print '''Details: Automates the conversion of nexus-formatted phylogenies into the
newick format.
Basic usage: vespa.py mrbayes_reader -input=USR_INPUT
Supported file format(s): -input option: MrBayes converged NEXUS output files'''
elif help_requested == 'create_subtrees':
print '''Details: Simplifies pruning large multigene phylogenies into smaller
sub-phylogenies.
Basic usage: vespa.py create_subtrees -input=USR_INPUT
Supported file format(s): -input option: newick formatted files'''
elif help_requested == 'create_branch':
print '''Details: Simplify the creation of the branch-label table required for
the branch-site models of codeML.
Basic usage: vespa.py create_branch -input=USR_INPUT
Supported file format(s): -input option: newick formatted files'''
elif help_requested == 'codeml_reader':
print '''Details: Parses the complex codeML directory structure and create
simplified results.
Basic usage: vespa.py codeml_reader -input=USR_INPUT
Supported file format(s): -input option: VESPA formmated codeML output files'''
if third_party:
print '\nSee program manual for third-party program citations and additional options\n'
else:
print '\nSee program manual for additional options\n'
elif help_requested:
print 'Command not found, please confirm commands in help file.\n'
else:
print '''VESPA v1.0b - [La]rge-scale [M]olecular evolution and selective pressure [P]ipeline
Authors: Andrew E. Webb, Thomas A. Walsh & Mary J. O'Connell
__ ________ _____ _____
\ \ / / ____|/ ____| __ \ /\
\ \ / /| |__ | (___ | |__) / \
\ \/ / | __| \___ \| ___/ /\ \
\ / | |____ ____) | | / ____ \
\/ |______|_____/|_| /_/ \_\
----------------------------------------------------------------------------------
||||| Command Help ||||||
----------------------------------------------------------------------------------
Specify the command of interest after invoking help/h.
For example: vespa.py help clean
----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
||||| Phase 1: Data Preparation |||||
----------------------------------------------------------------------------------
This phase included for users new to bioinformatics. The phase prepares downloaded
genomes for homology searching using the two VESPA supported homology search tools:
BLAST and HMMER.
Commands: clean, ensemble_clean, translate, create_database, gene_selection
----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
||||| Phase 2: Homology Searching |||||
----------------------------------------------------------------------------------
Details: This phase is concerned with identifying groups of similar sequences from
either BLAST or HMMER homology searches.
Commands: similarity_groups, reciprocal_groups, best_reciprocal_groups
----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
||||| Phase 3: Alignment Assessment & Phylogeny Reconstruction |||||
----------------------------------------------------------------------------------
Details: This phase combines multiple third-party programs to automate the
assessment, selection, and phylogenetic reconstruction of protein MSAs.
Commands: metal_compare, prottest_setup, prottest_reader, mrbayes_setup
----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
||||| Phase 4: Selection Analysis Preparation |||||
----------------------------------------------------------------------------------
Details: This phase automates large-scale selective pressure analysis using codeML
from the PAML package.
Commands: map_alignments, infer_genetree, mrbayes_reader, link_input, codeml_setup,
create_subtrees, create_branch
----------------------------------------------------------------------------------
----------------------------------------------------------------------------------
||||| Phase 5: Selection Analysis Assessment |||||
----------------------------------------------------------------------------------
Details: This phase automatically parses the codeML directory structure
and create simplified summary files
Command: codeml_reader
----------------------------------------------------------------------------------
See program manual for additional information.
'''
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### _____ _
### / ____| |
### | | | | __ _ ___ ___ ___ ___
### | | | |/ _` / __/ __|/ _ \/ __|
### | |____| | (_| \__ \__ \ __/\__ \
### \_____|_|\__,_|___/___/\___||___/
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### class: sequence - defines base sequence information and has basic functions ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
class sequence_data(object):
def __init__(self, header, sequence):
self.header = header
self.sequence = sequence.upper()
self.filename = header
self.length = len(''.join(sequence))
self.type = ''
def __len__(self):
return len(''.join(self.sequence))
def __str__(self):
prtSeq = ''.join(self.header) + '\n'.join([self.sequence[i:i+60] for i in range(0, len(self.sequence), 60)])
return prtSeq
def seq_filename (self, definement):
self.filename = str(definement)
def seq_translate (self):
codonTbl = {'ATT':'I','ATC':'I','ATA':'I','CTT':'L',
'CTC':'L','CTA':'L','CTG':'L','TTA':'L',
'TTG':'L','GTT':'V','GTC':'V','GTA':'V',
'GTG':'V','TTT':'F','TTC':'F','ATG':'M',
'TGT':'C','TGC':'C','GCT':'A','GCC':'A',
'GCA':'A','GCG':'A','GGT':'G','GGC':'G',
'GGA':'G','GGG':'G','CCT':'P','CCC':'P',
'CCA':'P','CCG':'P','ACT':'T','ACC':'T',
'ACA':'T','ACG':'T','TCT':'S','TCC':'S',
'TCA':'S','TCG':'S','AGT':'S','AGC':'S',
'TAT':'Y','TAC':'Y','TGG':'W','CAA':'Q',
'CAG':'Q','AAT':'N','AAC':'N','CAT':'H',
'CAC':'H','GAA':'E','GAG':'E','GAT':'D',
'GAC':'D','AAA':'K','AAG':'K','CGT':'R',
'CGC':'R','CGA':'R','CGG':'R','AGA':'R',
'AGG':'R','TAA':'*','TAG':'*','TGA':'*'}
strSeq = ''.join(self.sequence).strip()
self.sequence = ''.join([codonTbl.get(strSeq[3*n:3*n+3], 'X') for n in range(len(strSeq)//3)])
self.length = len(self.sequence)
return self
def seq_revcomp (self):
from string import maketrans
strRev = ''.join(self.sequence).strip()[::-1]
self.sequence = strRev.translate(maketrans('ACTGUBVDHKMRYNSW','TGACAVBHDMKYRNSW'))
return self
def seq_type (self):
type_status = False
for unique_amino_characters in ['q', 'e', 'i', 'l', 'f', 'p']:
if unique_amino_characters in ''.join(self.sequence).strip().lower():
self.type = 'protein'
type_status = True
if not type_status:
self.type = 'DNA'
def internal_stop(self):
def internal_translate(strSeq):
codonTbl = {'ATT':'I','ATC':'I','ATA':'I','CTT':'L',
'CTC':'L','CTA':'L','CTG':'L','TTA':'L',
'TTG':'L','GTT':'V','GTC':'V','GTA':'V',
'GTG':'V','TTT':'F','TTC':'F','ATG':'M',
'TGT':'C','TGC':'C','GCT':'A','GCC':'A',
'GCA':'A','GCG':'A','GGT':'G','GGC':'G',
'GGA':'G','GGG':'G','CCT':'P','CCC':'P',
'CCA':'P','CCG':'P','ACT':'T','ACC':'T',
'ACA':'T','ACG':'T','TCT':'S','TCC':'S',
'TCA':'S','TCG':'S','AGT':'S','AGC':'S',
'TAT':'Y','TAC':'Y','TGG':'W','CAA':'Q',
'CAG':'Q','AAT':'N','AAC':'N','CAT':'H',
'CAC':'H','GAA':'E','GAG':'E','GAT':'D',
'GAC':'D','AAA':'K','AAG':'K','CGT':'R',
'CGC':'R','CGA':'R','CGG':'R','AGA':'R',
'AGG':'R','TAA':'*','TAG':'*','TGA':'*'}
return ''.join([codonTbl.get(strSeq[3*n:3*n+3], 'X') for n in range(len(strSeq)//3)])
test_sequence = ''
if self.type == 'unknown':
self.seq_type()
if self.type == 'DNA':
test_sequence = internal_translate(''.join(self.sequence).strip())
else:
test_sequence = ''.join(self.sequence).strip()
if not test_sequence.count('*'):
return False
else:
if test_sequence.count('*') > 1:
return True
elif not test_sequence.endswith('*'):
return True
else:
return False
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### class: sequence_reader - sequence reading capabilties ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
class sequence_reader(object):
def __init__(self, filename):
self.filename = filename
self.type = ''
def sequence_format (self):
with open(self.filename, 'rU') as unknown_sequence_data:
format_data = unknown_sequence_data.readline().strip()
list_format_data = format_data.split()
if len(list_format_data) > 1:
chk_num_format_data = [current_format_data.isdigit() for current_format_data in list_format_data]
if format_data.startswith('>'):
self.type = 'fasta'
elif '#NEXUS' in format_data:
self.type = 'nexus'
elif len(list_format_data) == 2 and False not in chk_num_format_data:
self.type = 'phylip'
else:
self.type = ''
def read (self):
from collections import defaultdict
import sys
sequence_reader.sequence_format(self)
if self.type == 'fasta':
def sequnce_reader (sequence_file):
from itertools import groupby
sequenceGroups = (entry[1] for entry in groupby(open(sequence_file, 'rU'), lambda line: line.startswith('>')))
for header in sequenceGroups:
header = header.next()
seq = "".join(sequence.strip() for sequence in sequenceGroups.next())
yield header, seq
for read_header, read_sequence in sequnce_reader(self.filename):
yield sequence_data(read_header, read_sequence)
elif self.type == 'phylip':
with open(self.filename, 'rU') as phylip_sequence_data:
phylip_stats = []
phylip_dict = defaultdict(list)
sequence_count = 0
for phylip_sequence_lines in phylip_sequence_data:
if not phylip_stats:
sequence_stats = phylip_sequence_lines.split()
phylip_stats = [int(current_sequence_stat) for current_sequence_stat in sequence_stats]
elif not phylip_sequence_lines.strip():
sequence_count = 0
else:
if len(phylip_sequence_lines.split()) == 2:
phylip_split = phylip_sequence_lines.strip().split()
phylip_dict[sequence_count] = [phylip_split[0], phylip_split[1]]
else:
phylip_dict[sequence_count][1] += phylip_sequence_lines.strip()
sequence_count += 1
for phylip_key, (phylip_header, phylip_sequence) in phylip_dict.items():
sequence_temp = sequence_data('>{0}\n'.format(phylip_header), phylip_sequence)
del phylip_dict[phylip_key]
yield sequence_temp
elif self.type == 'nexus':
with open(self.filename, 'rU') as nexus_sequence_data:
nexus_stats = []
nexus_dict = defaultdict(str)
nexus_data_chk, nexus_matrix_chk = False, False
for nexus_sequence_lines in nexus_sequence_data:
if nexus_data_chk and nexus_matrix_chk:
if nexus_sequence_lines.strip() == ';':
break
elif nexus_sequence_lines.strip() != '':
nexus_split = nexus_sequence_lines.strip().split()
print nexus_split
if not nexus_dict.has_key(nexus_split[0]):
nexus_dict[nexus_split[0]] = nexus_split[1]
else:
nexus_dict[nexus_split[0]] += nexus_split[1]
if 'Begin data;' in nexus_sequence_lines:
nexus_data_chk = True
if 'Matrix' in nexus_sequence_lines:
nexus_matrix_chk = True
if nexus_data_chk and not nexus_matrix_chk:
if 'ntax' in nexus_sequence_lines:
nexus_stats = [int(nexus_stat.split('=')[1]) for nexus_stat in nexus_sequence_lines.strip().replace(';','').split() if '=' in nexus_stat]
for nexus_header, nexus_sequence in nexus_dict.items():
sequence_temp = sequence_data('>{0}\n'.format(nexus_header), nexus_sequence)
del nexus_dict[nexus_header]
yield sequence_temp
else:
print 'Sequence format not recognized. Please confirm that input is correctly formmated.'
sys.exit()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### class: command_line_data - assigns multiple varibles defaults and allows for user defined options ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
class command_line_data(object):
def __init__(self, define_location_string, define_if_input_in_dir):
#assignment variables
self.input_in_dir = define_if_input_in_dir
self.dir_location = ''
if self.input_in_dir:
self.dir_location = define_location_string.rsplit('/',1)[0]
self.input_location = define_location_string
#current variables
self.current_input = define_location_string
self.current_input_filename = define_location_string
self.current_input_dir = ''
if self.input_in_dir:
self.current_input_filename = define_location_string.rsplit('/',1)[-1]
self.current_input_dir = define_location_string.rsplit('/',1)[0]
self.current_output = ''
self.current_output_filename = ''
self.current_output_dir = ''
self.current_output_singlefile = ''
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### _____ _ ______ _ _
### / ____| | | | ____| | | (_)
### | | __ ___ _ __ ___ _ __ __ _| | | |__ _ _ _ __ ___| |_ _ ___ _ __ ___
### | | |_ |/ _ \ '_ \ / _ \ '__/ _` | | | __| | | | '_ \ / __| __| |/ _ \| '_ \/ __|
### | |__| | __/ | | | __/ | | (_| | | | | | |_| | | | | (__| |_| | (_) | | | \__ \
### \_____|\___|_| |_|\___|_| \__,_|_| |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: check_output:
### Details: handles general output assignment for VESPA functions
def check_output(file_to_verify, default_output):
if file_to_verify.input_in_dir:
if not file_to_verify.current_output_dir:
file_to_verify.current_output_dir = default_output + '_' + file_to_verify.current_input_dir
if not os.path.exists(file_to_verify.current_output_dir):
os.makedirs(file_to_verify.current_output_dir)
if not file_to_verify.current_output_filename:
file_to_verify.current_output_filename = file_to_verify.current_input_filename
else:
file_to_verify.current_output_filename = default_output + '_' + file_to_verify.current_output_filename
file_to_verify.current_output = '{0}/{1}'.format(file_to_verify.current_output_dir, file_to_verify.current_output_filename)
else:
if not file_to_verify.current_output:
file_to_verify.current_output = '{0}_{1}'.format(default_output,file_to_verify.current_input)
file_to_verify.current_output_dir = '{0}_{1}'.format(default_output,remove_extension(file_to_verify.current_input))
return file_to_verify.current_output_dir, file_to_verify.current_output_filename, file_to_verify.current_output
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: check_output_dir:
### Details: handles directory output assignment for VESPA functions
def check_output_dir(default_output):
global bme_output_directory
if not bme_output_directory:
bme_output_directory = default_output
if not os.path.exists(bme_output_directory):
os.makedirs(bme_output_directory)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: return_filename:
### Details: Returns the filename without any file extention of the user specified filepath
def return_filename_wo_ext(file_path):
return_file = ''
if '/' in file_path:
return_file = file_path.split('/')[-1]
if '.' in return_file:
return_file = return_file.split('.',1)[0]
else:
return_file = file_path
if '.' in return_file:
return_file = return_file.split('.',1)[0]
return return_file
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: return_filename:
### Details: Returns the filename of the user specified filepath
def return_filename(file_path):
return_file = ''
if '/' in file_path:
return_file = file_path.split('/')[-1]
else:
return_file = file_path
return return_file
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: remove_extension:
### Details: Removes file extension from the user specified filepath
def remove_extension(file_path):
return_file = ''
if '.' in file_path:
return_file = file_path.split('.',1)[0]
else:
return_file = file_path
return return_file
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: return_extension:
### Details: Returns the file extension from the user specified filepath
def return_extension(file_path):
return_file = ''
if '.' in file_path:
return_file = file_path.rsplit('.')[-1]
else:
return_file = ''
return return_file
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: return_directory:
### Details: Returns the current directory from the user specified filepath
def return_directory(file_path):
if '/' in file_path:
return file_path.rsplit('/', 1)[0]
else:
return False
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: verify_sequence_file:
### Details: Verfies that the file is a sequence file
def verify_sequence_file(file_path):
verify_sequence = sequence_reader(file_path)
verify_sequence.sequence_format()
return verify_sequence.type
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: verify_alignment:
### Details: Verfies that sequence file is an alignment
def verify_alignment(file_path):
len_list = []
for check_sequence in sequence_reader(file_path).read():
len_list.append(len(check_sequence))
if len(set(len_list)) == 1:
return True
else:
return False
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: ensembl_infer:
### Details: Returns species for ENSEMBL sequences
def ensembl_infer(query_header):
return_species = ''
abnormal_ensembl_table = {'FB':'Fruitfly','ENSG':'Human', 'ENSCSAVG':'C_savignyi',}
ensembl_table = {'ENSAMEG':'Panda', 'ENSAPLG':'Duck', 'ENSACAG':'Anole_lizard',
'ENSAMXG':'Cave_fish', 'ENSBTAG':'Cow', 'ENSCELG':'C_elegans',
'ENSCJAG':'Marmoset', 'ENSCAFG':'Dog', 'ENSCPOG':'Guinea_Pig',
'ENSCHOG':'Sloth', 'ENSCING':'C_intestinalis','ENSXMAG':'Platyfish',
'ENSDARG':'Zebrafish', 'ENSDNOG':'Armadillo', 'ENSDORG':'Kangaroo_rat',
'ENSETEG':'Lesser_hedgehog_tenrec', 'ENSECAG':'Horse', 'ENSEEUG':'Hedgehog',
'ENSFCAG':'Cat', 'ENSFALG':'Flycatcher', 'ENSGMOG':'Cod',
'ENSGALG':'Chicken', 'ENSGACG':'Stickleback', 'ENSGGOG':'Gorilla',
'ENSSTOG':'Squirrel', 'ENSLACG':'Coelacanth', 'ENSLOCG':'Spotted_gar',
'ENSLAFG':'Elephant', 'ENSMMUG':'Macaque', 'ENSMEUG':'Wallaby',
'ENSMGAG':'Turkey', 'ENSMICG':'Mouse_Lemur', 'ENSMODG':'Opossum',
'ENSMUSG':'Mouse', 'ENSMPUG':'Ferret', 'ENSMLUG':'Microbat',
'ENSNLEG':'Gibbon', 'ENSOPRG':'Pika', 'ENSONIG':'Tilapia',
'ENSOANG':'Platypus', 'ENSOCUG':'Rabbit', 'ENSORLG':'Medaka',
'ENSOGAG':'Bushbaby', 'ENSOARG':'Sheep', 'ENSPTRG':'Chimpanzee',
'ENSPSIG':'Chinese_softshell_turtle', 'ENSPMAG':'Lamprey', 'ENSPPYG':'Orangutan',
'ENSPCAG':'Hyrax', 'ENSPVAG':'Megabat', 'ENSRNOG':'Rat',
'ENSSCEG':'S_cerevisiae', 'ENSSHAG':'Tasmanian_devil', 'ENSSARG':'Shrew',
'ENSSSCG':'Pig', 'ENSTGUG':'Zebra Finch', 'ENSTRUG':'Fugu',
'ENSTSYG':'Tarsier', 'ENSTNIG':'Tetraodon', 'ENSTBEG':'Tree Shrew',
'ENSTTRG':'Dolphin', 'ENSVPAG':'Alpaca', 'ENSXETG':'Xenopus'}
if ensembl_table.has_key(query_header[1:8]):
return_species = ensembl_table[query_header[1:8]]
else:
for check_species in abnormal_ensembl_table.keys():
if check_species in query_header:
return_species = abnormal_ensembl_table[check_species]
return return_species
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: create_unique_file:
### Details: Creates a file, if file already exits a number is added until the filename is unique
def create_unique_file(requested_filename):
import os
if '.' in requested_filename:
filename = requested_filename.strip().rsplit('.',1)[0]
filename_extension = requested_filename.strip().rsplit('.',1)[1]
if os.path.isfile('{0}.{1}'.format(filename, filename_extension)):
counter = 1
while os.path.isfile('{0}_{1}.{2}'.format(filename, counter, filename_extension)):
counter += 1
return open('{0}_{1}.{2}'.format(filename, counter, filename_extension), 'w')
else:
return open('{0}.{1}'.format(filename, filename_extension), 'w')
else:
filename = requested_filename.strip()
if os.path.isfile('{0}'.format(filename)):
counter = 1
while os.path.isfile('{0}_{1}'.format(filename, counter)):
counter += 1
return open('{0}_{1}'.format(filename, counter), 'w')
else:
return open('{0}'.format(filename), 'w')
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: check_if_input_directory:
### Details: Checks if specified filepath exists and if so, check if the specified filepath is a file or directory.
def check_if_input_directory (input_varible):
if os.path.isdir(input_varible):
return True
elif os.path.isfile(input_varible):
return False
else:
print 'Input file or directory ({0}) does not exist - exiting program'.format(input_varible)
sys.exit()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### _____ _ _ _____ _
### | ____| | | | | __ \| |
### | |__ | |_| |__ | |__) | |__ __ _ ___ ___
### |___ \| __| '_ \ | ___/| '_ \ / _` / __|/ _ \
### ___) | |_| | | | | | | | | | (_| \__ \ __/
### |____/ \__|_| |_| |_| |_| |_|\__,_|___/\___|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_codeml_reader:
### Details: VESPA codeML output reader
def vespa_codeml_reader (input_files):
print 'VESPA: CodeML Reader'
import os, sys, glob
from collections import defaultdict
def codeml_raw_reader(input_files):
import subprocess, os
codeml_reader_output = create_unique_file('codeml_reader.log')
report_list, return_list = [], []
for raw_codeml_input in input_files:
omega_position = 0
split_codeml = raw_codeml_input.current_input.strip().split('/')
for pos, string in enumerate(split_codeml):
if 'Omega' in string:
omega_position = len(split_codeml) - (pos - 2)
if omega_position != 0:
report_list.append(raw_codeml_input.current_input.strip().rsplit('/',omega_position)[0])
report_list = list(set(report_list))
for orginal_path in report_list:
report_split = orginal_path.split('/',1)
codeml_wrapper_check, codeml_wrapper_bin = False, False
if not bme_output_directory:
report_output_dir = 'Report_{0}'.format(report_split[0])
check_output_dir(report_output_dir)
report_path = '{0}/{1}'.format(bme_output_directory,report_split[1])
if not os.path.exists(report_path):
os.makedirs(report_path)
if not codeml_wrapper_check:
try:
codeml_wrapper_call = subprocess.Popen(['CreateCodemlReports.pl', orginal_path, report_path, '-overwrite=Yes'], stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
codeml_wrapper_check = True
except:
codeml_wrapper_bin = True
if codeml_wrapper_bin:
try:
codeml_wrapper_call = subprocess.Popen(['perl', 'CreateCodemlReports.pl', orginal_path, report_path, '-overwrite=Yes'], stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
codeml_wrapper_check = True
except:
pass
if codeml_wrapper_check:
wrapper_out, wrapper_error = codeml_wrapper_call.communicate()
if not wrapper_error:
codeml_reader_output.write('Currently Parsing: {0}\n'.format(orginal_path))
codeml_reader_output.write(wrapper_out)
for path, sub_dirs, file_list in os.walk(report_path):
for files in file_list:
if not files.startswith('.'):
return_list.append(os.path.join(path, files))
else:
print wrapper_error
codeml_reader_output.write('Error running CreateCodemlReports.pl, please confirm that the script and all modules are installed.\n')
else:
codeml_reader_output.write('Error running CreateCodemlReports.pl, please confirm that the script and all modules are installed.\n')
codeml_reader_output.close()
return return_list
def append_character_data(codeml_matched_site_apd, extand_lineage_seq_apd, ps_char_seq_apd, seq_type_apd):
if seq_type_apd == 'DNA':
ps_char_seq_apd[int(codeml_matched_site_apd) * 3] = extand_lineage_seq_apd[int(codeml_matched_site_apd) * 3]
ps_char_seq_apd[(int(codeml_matched_site_apd) * 3) + 1] = extand_lineage_seq_apd[(int(codeml_matched_site_apd) * 3) + 1]
ps_char_seq_apd[(int(codeml_matched_site_apd) * 3) + 2] = extand_lineage_seq_apd[(int(codeml_matched_site_apd) * 3) + 2]
elif seq_type_apd == 'protein':
ps_char_seq_apd[int(codeml_matched_site_apd)] = extand_lineage_seq_apd[int(codeml_matched_site)]
return ps_char_seq_apd
def append_site_data(codeml_matched_site_apd, ps_site_seq_apd, seq_type_apd):
if seq_type_apd == 'DNA':
ps_site_seq_apd[int(codeml_matched_site_apd) * 3] = 'N'
ps_site_seq_apd[(int(codeml_matched_site_apd) * 3) + 1] = 'N'
ps_site_seq_apd[(int(codeml_matched_site_apd) * 3) + 2] = 'N'
elif seq_type_apd == 'protein':
ps_site_seq_apd[int(codeml_matched_site_apd)] = 'X'
return ps_site_seq_apd
global bme_branch_label_table, bme_alignment_path, bme_output_directory
branch_models = False
sequence_type = ''
sequence_input_files = []
if bme_branch_label_table:
if os.path.isfile(bme_branch_label_table):
branch_models = True
ancestral_lineage_matcher = defaultdict(list)
with open(bme_branch_label_table, 'rU') as label_data:
for label_lines in label_data:
if ':' in label_lines:
ancestral_lineage_matcher[label_lines.strip().split(': ')[0]].extend([temp_labels.strip() for temp_labels in label_lines.strip().split(': ')[-1].split(',')])
else:
ancestral_lineage_matcher[label_lines.strip()].append(label_lines.strip())
else:
print 'Could not locate specified label table file, please check'
sys.exit()
else:
print 'Branch labels not specified, please include the option: -label_table=USR_TBL. Only site models alignments will be created'
if bme_alignment_path:
if check_if_input_directory(bme_alignment_path):
for path, sub_dirs, file_list in os.walk(bme_alignment_path):
for files in file_list:
if not files.startswith('.'):
sequence_input_files.append(os.path.join(path, files))
else:
sequence_input_files.append(bme_alignment_path)
else:
print 'Alignments path not specified, please use -alignment_path='
sys.exit()
report_files = codeml_raw_reader(input_files)
for sequence_input in sequence_input_files:
sequence_type, current_sequence_file = '', return_filename_wo_ext(sequence_input)
current_sequences = {}
for working_sequence in sequence_reader(sequence_input).read():
current_sequences[working_sequence.header.split('|')[0][1:]] = working_sequence
if not sequence_type:
working_sequence.seq_type()
sequence_type = working_sequence.type
for codeml_input in report_files:
if current_sequence_file in codeml_input.split(os.sep):
if 'PosSites' in codeml_input:
if branch_models:
if 'modelA.txt' in codeml_input:
current_lineage = []
for identifier_labels in ancestral_lineage_matcher.keys():
if identifier_labels in return_filename(codeml_input):
ps_character_seqeunce_dict = {}
get_length = []
for extant_species in ancestral_lineage_matcher[identifier_labels]:
if current_sequences.has_key(extant_species):
get_length.append(len(current_sequences[extant_species]))
ps_character_seqeunce_dict[extant_species] = list('-' * len(current_sequences[extant_species]))
if len(set(get_length)) == 1:
ps_site_seqeunce = list('-' * get_length[0])
with open(codeml_input, 'rU') as codeml_data:
for codeml_lines in codeml_data:
if 'P(w>1) > 0.5' not in codeml_lines:
codeml_site_data = codeml_lines.strip().split()
codeml_matched_site = int(codeml_site_data[0]) - 1
ps_site_seqeunce = append_site_data(codeml_matched_site, ps_site_seqeunce, sequence_type)
for extant_species in ancestral_lineage_matcher[identifier_labels]:
if current_sequences.has_key(extant_species):
ps_character_seqeunce_dict[extant_species] = append_character_data(codeml_matched_site, current_sequences[extant_species].sequence, ps_character_seqeunce_dict[extant_species], sequence_type)
codeml_alignment = open('{0}.fasta'.format(remove_extension(codeml_input)), 'w')
codeml_alignment.write(str(sequence_data('>PS_Sites|{0}\n'.format(identifier_labels),''.join(ps_site_seqeunce))) + '\n')
for keys in current_sequences.keys():
if keys in ancestral_lineage_matcher[identifier_labels]:
codeml_alignment.write(str(sequence_data('>PS_Characters|' + current_sequences[keys].header[1:],''.join(ps_character_seqeunce_dict[keys]))) + '\n')
codeml_alignment.write(str(current_sequences[keys]) + '\n')
for keys in current_sequences.keys():
if keys not in ancestral_lineage_matcher[identifier_labels]:
codeml_alignment.write(str(current_sequences[keys]) + '\n')
codeml_alignment.close()
else:
print 'Error: Not specified'
elif '.txt' in codeml_input:
get_length = [len(current_sequences[keys]) for keys in current_sequences.keys()]
if len(set(get_length)) == 1:
ps_site_seqeunce = list('-' * get_length[0])
with open(codeml_input, 'rU') as codeml_data:
for codeml_lines in codeml_data:
if 'P(w>1) > 0.5' not in codeml_lines:
codeml_site_data = codeml_lines.strip().split()
codeml_matched_site = int(codeml_site_data[0]) - 1
ps_site_seqeunce = append_site_data(codeml_matched_site, ps_site_seqeunce, sequence_type)
codeml_alignment = open('{0}.fasta'.format(remove_extension(codeml_input)), 'w')
codeml_alignment.write(str(sequence_data('>PS_Sites\n',''.join(ps_site_seqeunce))) + '\n')
for keys in current_sequences.keys():
codeml_alignment.write(str(current_sequences[keys]) + '\n')
codeml_alignment.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### _ _ _ _ _____ _
### | || | | | | | | __ \| |
### | || |_| |_| |__ | |__) | |__ __ _ ___ ___
### |__ _| __| '_ \ | ___/| '_ \ / _` / __|/ _ \
### | | | |_| | | | | | | | | | (_| \__ \ __/
### |_| \__|_| |_| |_| |_| |_|\__,_|___/\___|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_branch_table:
### Details: VESPA function that creates a branch table from species tree
def vespa_branch_table(input_files):
print 'VESPA: Branch Table Creator'
import os, sys, dendropy, re, copy
from collections import defaultdict
def select_subtree(sent_tree):
pre_edited_tree = dendropy.Tree.get_from_string(sent_tree,"newick")
finished_node_selection = False
return_nodes = defaultdict(list)
while not finished_node_selection:
user_selected_node = raw_input('Please select a node for selection (numerical values): ')
if user_selected_node in [internal_nodes.label for internal_nodes in pre_edited_tree.internal_nodes()]:
user_confirm = raw_input('Node {0} found. Please confirm (y / n): '.format(user_selected_node))
if user_confirm.lower().startswith('y'):
edited_tree = copy.deepcopy(pre_edited_tree)
edited_node = edited_tree.find_node_with_label(user_selected_node)
edited_leafs = [leaf_nodes.taxon for leaf_nodes in edited_node.leaf_iter()]
edited_leaf_taxa = [leaf_nodes.label for leaf_nodes in edited_leafs]
edited_tree.retain_taxa(edited_leafs)
confirm_ID = False
while not confirm_ID:
node_ID = raw_input('Please specify a label for this branch (used for codeml analysis): ')
if node_ID:
id_confirm = raw_input('Label: {0}? Please confirm (y / n): '.format(node_ID))
if id_confirm.lower().startswith('y'):
confirm_ID = True
return_nodes[node_ID] = copy.deepcopy(edited_leaf_taxa)
another_node_check = raw_input('Select an additional branch? Please confirm (y / n): ')
if another_node_check.lower().startswith('n'):
finished_node_selection = True
return return_nodes
def select_leaf(sent_taxa):
finished_leaf_selection = False
return_list = []
while not finished_leaf_selection:
user_selected_leafs = raw_input('Please select a leaf (taxa) for selection (if mutiple, seperate with comma): ')
leaf_list = [current_leaf.strip() for current_leaf in user_selected_leafs.split(',')]
if len(leaf_list) == len(list(set(leaf_list) & set(sent_taxa))):
user_confirm = raw_input('Leaf(s) {0} found. Please confirm (y / n): '.format(', '.join(leaf_list)))
if user_confirm.lower().startswith('y'):
finished_leaf_selection = True
return_list = copy.deepcopy(leaf_list)
else:
unknown_leafs = list(set(leaf_list) - set(set(leaf_list) & set(sent_taxa)))
print 'Leaf(s) {0} not found. Please confirm taxa spelling and formatting.'.format(', '.join(unknown_leafs))
return return_list
for species_tree in input_files:
def tree_labeler (sent_tree):
labeler = list(sent_tree)
for pos, matched_nodes in enumerate([match.end() for match in re.finditer('\)', sent_tree)][::-1]):
labeler.insert(matched_nodes,str(pos))
return ''.join(labeler) + ';'
branch_file = create_unique_file('branch_table.txt')
saved_leafs = []
saved_braches = []
data_tree = dendropy.Tree.get_from_path(species_tree.current_input,"newick")
original_tree = data_tree.as_string(schema="newick")
original_taxa = [str(taxa_ids).replace("'",'') for taxa_ids in data_tree.taxon_namespace]
screen_tree = dendropy.Tree.get_from_string(tree_labeler(original_tree),"newick")
screen_tree.print_plot(show_internal_node_labels=True)
screen_string = screen_tree.as_string(schema="newick")
command_dict = {'0':'Finished', '1':'Species Selection', '2':'Ancestral Lineage Selection'}
finished_selection = False
while not finished_selection:
print 'Possible actions\n____________________\n1. Species Selection\n2. Ancestral Lineage Selection\n0. Finished\n'
user_request = raw_input('Please select an action ( 0 / 1 / 2 ): ')
if user_request in ['0', '1', '2']:
user_confirm = raw_input(command_dict[user_request] + '. Please confirm (y / n): ')
if user_confirm.lower().startswith('y'):
if user_request == '1':
saved_leafs.extend(select_leaf(original_taxa))
elif user_request == '2':
saved_braches.append(select_subtree(screen_string))
elif user_request == '0':
finished_selection = True
for current_leaf in saved_leafs:
branch_file.write(current_leaf + '\n')
for current_dict in saved_braches:
for label, species in current_dict.items():
branch_file.write('{0}: {1}\n'.format(label,', '.join(species)))
branch_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_map_protein_gaps:
### Details: Creates nucleotide alignments using protein alignments and a nucleotide sequence database
def vespa_map_protein_gaps(input_files):
print 'VESPA: Mapping Nucleotide Alignments'
from collections import defaultdict
global bme_sequence_database_location
alignment_counter, alignment_map, protein_map = (defaultdict(list), defaultdict(list), defaultdict(list))
def cleave_stop_codon(sequence):
stop_codon_list = ['TAA','TAG','TGA']
if sequence[-3:] in stop_codon_list:
return sequence[:-3]
else:
return sequence
if not bme_sequence_database_location:
print 'Sequence database not found'
sys.exit()
for sequence_input in input_files:
(mapped_output_directory, mapped_output_filename, mapped_output_path) = check_output(sequence_input, 'Map_Gaps')
for working_sequence in sequence_reader(sequence_input.current_input).read():
mapped_output_path = '{0}/{1}'.format(mapped_output_directory, return_filename(sequence_input.current_input))
amino_acid_positions = []
if working_sequence.sequence[-1] == '*':
working_sequence.sequence = working_sequence.sequence[:-1]
for position, amino_acid in enumerate(working_sequence.sequence):
if amino_acid != '-':
amino_acid_positions.extend([position * 3, (position * 3) + 1, (position * 3) + 2])
protein_map[working_sequence.header] = amino_acid_positions
alignment_map[working_sequence.header] = [mapped_output_path, len(working_sequence) * 3, len(working_sequence.sequence.replace('-','')) * 3]
alignment_counter[mapped_output_path].append(working_sequence.header)
for mapped_alignments in alignment_counter.keys():
try:
with open(mapped_alignments): os.remove(mapped_alignments)
except IOError:
pass
verify_database = False
for genome_sequence in sequence_reader(bme_sequence_database_location).read():
if not verify_database:
verify_database = True
genome_sequence.seq_type()
if genome_sequence.type == 'protein':
print 'Protein database detected, please use a nucleotide database'
sys.exit()
if alignment_map.has_key(genome_sequence.header):
alignment_output_filename = alignment_map[genome_sequence.header][0]
protein_adjested_sequence = cleave_stop_codon(genome_sequence.sequence)
if len(protein_adjested_sequence) == alignment_map[genome_sequence.header][2]:
nucleotide_alignment = list(alignment_map[genome_sequence.header][1] * '-')
for nucleotide, position in zip(protein_adjested_sequence, protein_map[genome_sequence.header]):
nucleotide_alignment[position] = nucleotide
sequence_output = open(alignment_output_filename, 'a')
sequence_output.write(str(sequence_data(genome_sequence.header, ''.join(nucleotide_alignment))) + '\n')
sequence_output.close()
else:
print 'Sequence length differences identified, please verify that the protein sequences have not been altered since translation'
sys.exit()
for alignment_files in alignment_counter.keys():
written_header_list = []
with open(alignment_files) as alignment_data:
for alignment_lines in alignment_data:
if alignment_lines.startswith('>'):
written_header_list.append(alignment_lines)
if len(written_header_list) != len(alignment_counter[alignment_files]):
convert_list = []
for written_header in written_header_list:
convert_list.append(written_header.replace(' ', '_').replace(':', '_').replace('(', '_').replace(')', '_').replace(',', '_'))
print 'Error in:', alignment_files + ',' + ','.join([print_id.strip() for print_id in list(set(alignment_counter[alignment_files]) - set(convert_list))])
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_mrbayes_reader:
### Details: Reads mrbayes output and converts to newick
def vespa_mrbayes_reader (input_files):
print 'VESPA: Reading MrBayes Output'
import re
for alignment_input in input_files:
(mrbayes_output_dir, mrbayes_output_filename, mrbayes_output) = check_output(alignment_input, 'MrBayes_Reader')
newick_output = open(remove_extension(mrbayes_output) + '.tre', 'w')
with open(alignment_input.current_input) as nexus_data:
check_nexus = nexus_data.readline()
tree_list = []
if check_nexus.strip() == '#NEXUS':
parsing_trees = False
for nexus_lines in nexus_data:
if 'end;' in nexus_lines:
parsing_trees = False
elif parsing_trees:
if nexus_lines.strip().startswith('tree'):
current_tree = nexus_lines.strip().split('=')[1].strip()
tree_list.append(re.sub('\)\d.\d+', ')',re.sub(':\d.\d+', '', current_tree)))
elif 'begin trees;' in nexus_lines:
parsing_trees = True
if len(set(tree_list)):
newick_output.write('{0}\n'.format(list(set(tree_list))[0]))
else:
print 'Error parsing NEXUS file. Please comfirm file is from MrBayes.'
newick_output.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_infer_genetree:
### Details: Function that generates gene trees using a specififed spcies tree
def vespa_infer_genetree (input_files):
print 'VESPA: Inferring GeneTree'
import dendropy, re, os, shutil
global bme_species_tree, bme_in_paralogs
if bme_species_tree:
if os.path.isfile(bme_species_tree):
try:
species_tree = dendropy.Tree.get(path=bme_species_tree, schema="newick", preserve_underscores=True)
except:
print 'Error reading: {0}. Please verify that species tree is in newick format'.format(bme_species_tree)
sys.exit()
taxa_list = [str(taxa_ids).replace("'",'') for taxa_ids in species_tree.taxon_namespace]
error_log = create_unique_file('infer_genetree.log')
for alignment_input in input_files:
sequence_counter, in_paralog_counter = 0, 0
if verify_alignment(alignment_input.current_input):
convert_tree = dendropy.Tree(species_tree)
taxa_check = dict([(taxa,'') for taxa in taxa_list])
for working_sequence in sequence_reader(alignment_input.current_input).read():
sequence_counter += 1
if working_sequence.header.split('|')[0][1:] in taxa_list:
if taxa_check[working_sequence.header.split('|')[0][1:]]:
if bme_in_paralogs:
in_paralog_counter += 1
if '(' in taxa_check[working_sequence.header.split('|')[0][1:]]:
current_leaf = taxa_check[working_sequence.header.split('|')[0][1:]][1:-1]
else:
current_leaf = taxa_check[working_sequence.header.split('|')[0][1:]]
taxa_check[working_sequence.header.split('|')[0][1:]] = '({0},{1})'.format(current_leaf, working_sequence.header.strip()[1:])
else:
print 'Duplication detected: {0}. Please check files'.format(working_sequence.header.strip()[1:])
error_log.write('Duplication detected: {0}. Please check files\n'.format(working_sequence.header.strip()[1:]))
else:
taxa_check[working_sequence.header.split('|')[0][1:]] = working_sequence.header.strip()[1:]
else:
print 'Non-tree species detected: {0}. Please check files'.format(working_sequence.header.strip()[1:])
error_log.write('Non-tree species detected: {0}. Please check files\n'.format(working_sequence.header.strip()[1:]))
if '' in taxa_check.values():
nodes_to_prune = [convert_tree.find_node_with_taxon_label(taxa).taxon for taxa in taxa_check.keys() if taxa_check[taxa] == '']
convert_tree.prune_taxa(nodes_to_prune)
pruned_tree = convert_tree.as_string(schema="newick").replace("'",'')
pruned_taxa = dendropy.Tree.get(data=pruned_tree, schema="newick").taxon_namespace.labels()
if len(pruned_taxa) == (sequence_counter - in_paralog_counter):
for raw_taxa, gene in taxa_check.items():
if gene:
replace_finder = re.search(raw_taxa +"(\)|\,|;)", pruned_tree)
pruned_tree = pruned_tree.replace(replace_finder.group(), replace_finder.group().replace(raw_taxa, gene))
(inferred_output_dir, inferred_output_filename, inferred_output) = check_output(alignment_input, 'Inferred_Genetree')
current_filename = return_filename_wo_ext(alignment_input.current_input)
sub_output_path = '{0}/{1}'.format(inferred_output_dir,current_filename)
if not os.path.exists(sub_output_path):
os.makedirs(sub_output_path)
shutil.copy(alignment_input.current_input,sub_output_path)
tree_file = open('{0}/{1}.tre'.format(sub_output_path,current_filename), 'w')
tree_file.write(pruned_tree)
tree_file.close()
else:
error_log.write('Error pruning: {0}.tre\n'.format(remove_extension(alignment_input.current_input)))
else:
print 'Unaligned seqeunces detected in: {0}'.format(return_filename(alignment_input.current_input))
error_log.write('Unaligned seqeunces detected in: {0}\n'.format(return_filename(alignment_input.current_input)))
error_log.close()
else:
print 'Could not locate specified tree file, please check'
sys.exit()
else:
print 'Species tree not specified, please include the option: -species_tree=USR_TRE'
sys.exit()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_link_input:
### Details: Combines alignment and tree data for Codeml
def vespa_link_input(input_files):
print 'VESPA: Linking Input'
import dendropy, os, sys, shutil
global bme_alignment_path
if bme_alignment_path:
error_log = create_unique_file('link_input.log')
for tree_files in input_files:
taxa_list = []
try:
gene_tree = dendropy.Tree.get_from_path(tree_files.current_input, schema="newick",preserve_underscores=True)
taxa_list = [str(taxa_ids).replace("'",'') for taxa_ids in gene_tree.taxon_namespace]
except IOError:
print 'Non-Tree file detected: {0}'.format(return_filename(tree_files.current_input))
error_log.write('Non-Tree file detected: {0}\n'.format(return_filename(tree_files.current_input)))
if taxa_list:
alignment_files = []
if check_if_input_directory(bme_alignment_path):
for path, sub_dirs, file_list in os.walk(bme_alignment_path):
for files in file_list:
if not files.startswith('.'):
alignment_files.append(os.path.join(path, files))
else:
alignment_files.append(bme_alignment_path)
for alignment_input in alignment_files:
alignment_taxa = []
if verify_alignment(alignment_input):
if return_filename_wo_ext(tree_files.current_input) == return_filename_wo_ext(alignment_input):
for working_sequence in sequence_reader(alignment_input).read():
alignment_taxa.append(working_sequence.header.strip()[1:])
if set(taxa_list) == set(alignment_taxa):
(linked_dir, linked_file, linked_output) = check_output(tree_files, 'Linked')
if not os.path.exists(linked_dir):
os.makedirs(linked_dir)
if tree_files.input_in_dir:
sub_output_dir = '{0}/{1}'.format(linked_dir,remove_extension(return_filename(tree_files.current_input)))
if not os.path.exists(sub_output_dir):
os.makedirs(sub_output_dir)
shutil.copy(tree_files.current_input,sub_output_dir)
shutil.copy(alignment_input,sub_output_dir)
else:
shutil.copy(tree_files.current_input,linked_dir)
shutil.copy(alignment_input,linked_dir)
else:
print 'Unaligned seqeunces detected in: {0}'.format(return_filename(alignment_input))
error_log.write('Unaligned seqeunces detected in: {0}\n'.format(return_filename(alignment_input)))
error_log.close()
else:
print 'Alignment directory not specified, please include the option: -alignment_path=USR_DIR'
sys.exit()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_codeml_setup:
### Details: Takes combined alignment and tree data and prepares the data for Codeml
def vespa_codeml_setup(input_files):
print 'VESPA: CodeML Setup'
import dendropy, os, sys, subprocess, copy
from collections import defaultdict
global bme_in_paralogs, bme_branch_label_table
error_log = create_unique_file('codeml_setup.log')
taskfarm_log = create_unique_file('codeml_taskfarm.txt')
branch_models = False
alignment_found_check, tree_found_check = False, False
if bme_branch_label_table:
if os.path.isfile(bme_branch_label_table):
branch_models = True
label_dict = defaultdict(list)
with open(bme_branch_label_table, 'rU') as label_data:
for label_lines in label_data:
if ':' in label_lines:
label_dict['node'].append([label_lines.strip().split(': ')[0], [temp_labels.strip() for temp_labels in label_lines.strip().split(': ')[-1].split(',')]])
else:
label_dict['leaf'].append(label_lines.strip())
else:
print 'Could not locate specified label table file, please check'
sys.exit()
else:
print 'Branch labels not specified, please include the option: -label_table=USR_TBL. Setup will be site models only'
error_log.write('Branch labels not specified, please include the option: -label_table=USR_TBL. Setup will be site models only\n')
for alignment_input in input_files:
if verify_sequence_file(alignment_input.current_input) != '':
species_convert = defaultdict(list)
if verify_alignment(alignment_input.current_input):
alignment_found_check = True
taxa_list = []
for tree_files in input_files:
if remove_extension(tree_files.current_input) == remove_extension(alignment_input.current_input):
if tree_files.current_input != alignment_input.current_input:
try:
tree_input = tree_files.current_input
original_tree = dendropy.Tree.get_from_path(tree_input, schema="newick",preserve_underscores=True)
taxa_list = [str(taxa_ids).replace("'",'') for taxa_ids in original_tree.taxon_namespace]
except IOError:
pass
if taxa_list:
tree_found_check = True
if bme_in_paralogs:
for taxa in taxa_list:
species_convert[taxa.split('|')[0]].append(taxa)
else:
if len(set([taxa.split('|')[0] for taxa in taxa_list])) == len(taxa_list):
for taxa in taxa_list:
species_convert[taxa.split('|')[0]].append(taxa)
else:
print 'Duplication detected: {0}. Please check file'.format(alignment_input.current_input)
error_log.write('Duplication detected: {0}. Please check file\n'.format(alignment_input.current_input))
continue
codeml_trees = [tree_input]
if branch_models:
if label_dict.has_key('leaf'):
for species in label_dict['leaf']:
if species_convert.has_key(species):
tree_file_name = '{0}_{1}.tre'.format(remove_extension(tree_input), species)
if len(species_convert[species]) > 1:
working_tree = copy.deepcopy(original_tree)
mrca_node = working_tree.mrca(taxon_labels=species_convert[species])
mrca_leafs = [leaf_nodes.taxon for leaf_nodes in mrca_node.leaf_iter()]
mrca_leaf_taxa = [leaf_nodes.label for leaf_nodes in mrca_leafs]
mrca_tree = dendropy.Tree(working_tree)
mrca_tree.retain_taxa(mrca_leafs)
mrca_newick = mrca_tree.as_string(schema="newick", suppress_rooting=True).strip().replace(';','')
if set(mrca_leaf_taxa) == set(species_convert[species]):
label_tree = original_tree.as_string(schema="newick", suppress_rooting=True)
label_tree = label_tree.replace("'",'').replace(mrca_newick.replace("'",''), mrca_newick.replace("'",'') + "'#1'")
tree_file = open(tree_file_name, 'w')
tree_file.write(label_tree)
tree_file.close()
codeml_trees.append(tree_file_name)
else:
print 'Unable to label: {0}. Additional genes present'.format(tree_file_name)
error_log.write('Unable to label: {0}. Additional genes present\n'.format(tree_file_name))
for gene_conversions in species_convert[species]:
paralog_file_name = '{0}_{1}.tre'.format(remove_extension(tree_input), gene_conversions.split('|')[1].strip())
label_tree = original_tree.as_string(schema="newick", suppress_rooting=True)
current_tree = label_tree.replace(gene_conversions, gene_conversions + '#1').replace("'",'')
tree_file = open(paralog_file_name, 'w')
tree_file.write(current_tree)
tree_file.close()
codeml_trees.append(paralog_file_name)
else:
for gene_conversions in species_convert[species]:
label_tree = original_tree.as_string(schema="newick", suppress_rooting=True)
current_tree = label_tree.replace(gene_conversions, gene_conversions + '#1').replace("'",'')
tree_file = open(tree_file_name, 'w')
tree_file.write(current_tree)
tree_file.close()
codeml_trees.append(tree_file_name)
else:
print 'Unable to label {0} for {1}. Cannot find {0} in genetree. {0} will not be included in codeML analysis for {1}.'.format(species, return_filename(tree_input))
error_log.write('Unable to label {0} for {1}. Cannot find {0} in genetree. {0} will not be included in codeML analysis for {1}.\n'.format(species, return_filename(tree_input)))
if label_dict.has_key('node'):
for node_data in label_dict['node']:
tree_file_name = remove_extension(tree_input) + '_' + node_data[0] + '.tre'
species_list = node_data[1]
node_genes, nodes_found = ([], [])
paralog_counter = 0
for node_species in species_list:
if species_convert.has_key(node_species):
if bme_in_paralogs:
if len(species_convert[node_species]) > 1:
paralog_counter += (len(species_convert[node_species]) - 1)
for append_genes in species_convert[node_species]:
node_genes.append(append_genes)
nodes_found.append(node_species)
else:
print 'Unable to label {0} for {1}. Cannot find {2} in genetree. {0} will not be included in codeML analysis for {1}.'.format(node_data[0], return_filename(tree_input), node_species)
error_log.write('Unable to label {0} for {1}. Cannot find {2} in genetree. {0} will not be included in codeML analysis for {1}.\n'.format(node_data[0], return_filename(tree_input), node_species))
if (len(node_genes) - paralog_counter) == len(species_list):
working_tree = copy.deepcopy(original_tree)
mrca_node = working_tree.mrca(taxon_labels=node_genes)
mrca_leafs = [leaf_nodes.taxon for leaf_nodes in mrca_node.leaf_iter()]
mrca_leaf_taxa = [leaf_nodes.label for leaf_nodes in mrca_leafs]
mrca_tree = dendropy.Tree(working_tree)
mrca_tree.retain_taxa(mrca_leafs)
mrca_newick = mrca_tree.as_string(schema="newick", suppress_rooting=True).strip().replace(';','')
if set(mrca_leaf_taxa) == set(node_genes):
label_tree = original_tree.as_string(schema="newick", suppress_rooting=True)
label_tree = label_tree.replace("'",'').replace(mrca_newick.replace("'",''), mrca_newick.replace("'",'') + "'#1'")
tree_file = open(tree_file_name, 'w')
tree_file.write(label_tree)
tree_file.close()
codeml_trees.append(tree_file_name)
else:
print 'Unable to label: {0}. Additional genes present'.format(node_data)
error_log.write('Unable to label: {0}. Additional genes present\n'.format(node_data))
else:
reduced_list_not_already_labeled = False
for check_nodes in label_dict['node']:
if node_data[0] != check_nodes[0]:
if set(nodes_found) == set(check_nodes[1]):
reduced_list_not_already_labeled = True
if len(nodes_found) == 1:
for check_leaves in label_dict['leaf']:
if str(nodes_found[0]) == str(check_leaves):
reduced_list_not_already_labeled = True
if not reduced_list_not_already_labeled and len(nodes_found) != 0:
working_tree = copy.deepcopy(original_tree)
mrca_node = working_tree.mrca(taxon_labels=node_genes)
mrca_leafs = [leaf_nodes.taxon for leaf_nodes in mrca_node.leaf_iter()]
mrca_leaf_taxa = [leaf_nodes.label for leaf_nodes in mrca_leafs]
mrca_tree = dendropy.Tree(working_tree)
mrca_tree.retain_taxa(mrca_leafs)
mrca_newick = mrca_tree.as_string(schema="newick", suppress_rooting=True).strip().replace(';','')
if set(node_genes) == set(mrca_leaf_taxa):
label_tree = original_tree.as_string(schema="newick", suppress_rooting=True)
label_tree = label_tree.replace("'",'').replace(mrca_newick.replace("'",''), mrca_newick.replace("'",'') + "'#1'")
tree_file = open(tree_file_name, 'w')
tree_file.write(label_tree)
tree_file.close()
codeml_trees.append(tree_file_name)
else:
print 'Unable to label: {0}. Additional genes present'.format(node_data)
error_log.write('Unable to label: {0}. Additional genes present\n'.format(node_data))
(codeml_dir, codeml_file, codeml_output) = check_output(alignment_input, 'Codeml_Setup')
main_output_dir = codeml_dir
codeml_wrapper_check, codeml_wrapper_bin = False, False
if alignment_input.input_in_dir:
setup_list = ['GenerateCodemlWorkspace.pl',alignment_input.current_input] + codeml_trees + [codeml_dir]
try:
codeml_wrapper_call = subprocess.Popen(setup_list, stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
codeml_wrapper_check = True
except:
codeml_wrapper_bin = True
if codeml_wrapper_bin:
setup_list = ['perl'] + setup_list
try:
codeml_wrapper_call = subprocess.Popen(setup_list, stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
codeml_wrapper_check = True
except:
pass
if codeml_wrapper_check:
wrapper_out, wrapper_error = codeml_wrapper_call.communicate()
if not wrapper_error:
for path, sub_dirs, file_list in os.walk(codeml_dir):
if 'Omega' in path:
taskfarm_log.write('cd {0}; codeml\n'.format(path))
else:
if 'number of sequences' in wrapper_error:
error_string = wrapper_error.strip().split(': ')[1].split(',')[0]
error_string = error_string[0].upper() + error_string[1:]
print error_string
error_log.write('Error running GenerateCodemlWorkspace.pl.\n')
error_log.write('Error Reported: {0}.\n'.format(error_string))
else:
print 'Error running GenerateCodemlWorkspace.pl. Please check log file for details.'
error_log.write('Error running GenerateCodemlWorkspace.pl.\n')
error_log.write('{0}.\n'.format(wrapper_error))
else:
print 'Error running GenerateCodemlWorkspace.pl, please confirm that the script and all modules are installed.'
error_log.write('Error running GenerateCodemlWorkspace.pl, please confirm that the script and all modules are installed.\n')
else:
setup_list = ['GenerateCodemlWorkspace.pl',alignment_input.current_input] + codeml_trees + [codeml_dir]
try:
codeml_wrapper_call = subprocess.Popen(setup_list, stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
codeml_wrapper_check = True
except:
codeml_wrapper_bin = True
if codeml_wrapper_bin:
setup_list = ['perl'] + setup_list
try:
codeml_wrapper_call = subprocess.Popen(setup_list, stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
codeml_wrapper_check = True
except:
pass
if codeml_wrapper_check:
wrapper_out, wrapper_error = codeml_wrapper_call.communicate()
if not wrapper_error:
for path, sub_dirs, file_list in os.walk(codeml_dir):
if 'Omega' in path:
taskfarm_log.write('cd {0}; codeml\n'.format(path))
else:
if 'number of sequences' in wrapper_error:
error_string = wrapper_error.strip().split(': ')[1].split(',')[0]
error_string = error_string[0].upper() + error_string[1:]
print error_string
error_log.write('Error running GenerateCodemlWorkspace.pl.\n')
error_log.write('Error Reported: {0}.\n'.format(error_string))
else:
print 'Error running GenerateCodemlWorkspace.pl. Please check log file for details.'
error_log.write('Error running GenerateCodemlWorkspace.pl.\n')
error_log.write('{0}.\n'.format(wrapper_error))
else:
print 'Error running GenerateCodemlWorkspace.pl, please confirm that the script and all modules are installed.'
error_log.write('Error running GenerateCodemlWorkspace.pl, please confirm that the script and all modules are installed.\n')
else:
with open(alignment_input.current_input) as test_data:
if test_data.readline().startswith('>'):
print 'Unaligned seqeunces detected in:{0}'.format(return_filename(alignment_input.current_input))
error_log.write('Unaligned seqeunces detected in:{0}\n'.format(return_filename(alignment_input.current_input)))
if not alignment_found_check:
print 'No alignment(s) specified by input option, please verify that each tree file is accompanied by an alignment'
if not tree_found_check:
print 'No tree file(s) specified by input option, please verify that each tree file is accompanied by an alignment'
taskfarm_log.close()
error_log.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_subtrees:
### Details: Creates subtrees from newick tree data
def vespa_subtrees(input_files):
print 'VESPA: Create SubTrees'
import os, sys, dendropy, re
from collections import defaultdict
class nodes_picker_data(object):
def __init__(self, current_tree, log_data):
self.job_name = current_tree.strip()
self.requested_command = log_data[0]
self.original_tree = log_data[1]
self.labeled_tree = ''
self.edited_tree = log_data[2]
self.outgroup_request = True
self.outgroups = []
def __str__(self):
return '#' + self.job_name + '\nEdit_Method:' + self.requested_command + '\nOrignal_Tree:' + self.original_tree + '\nEdited_Tree:' + self.edited_tree + '\nOutgroup(s):' + ', '.join(self.outgroups) + '\n'
def np_log_reader(np_log_file):
from itertools import groupby
return_trees = []
tree_groups = (entry[1] for entry in groupby(open(np_log_file, 'rU'), lambda line: line.startswith('#')))
for current_tree in tree_groups:
return_trees.append(nodes_picker_data(current_tree.next().strip()[1:], [tree_lines.strip().split(':')[1] for tree_lines in tree_groups.next()]))
return return_trees
def select_subtree(tree_to_modify):
unedited_tree = dendropy.Tree.get_from_string(tree_to_modify.labeled_tree,"newick")
while not tree_to_modify.edited_tree:
user_selected_node = raw_input('Please select a node for subtree creation: ')
if user_selected_node in [internal_nodes.label for internal_nodes in unedited_tree.internal_nodes()]:
user_confirm = raw_input('Node ' + user_selected_node + ' found. Please confirm (y / n): ')
if user_confirm.lower().startswith('y'):
edited_node = unedited_tree.find_node_with_label(user_selected_node)
edited_leafs = [leaf_nodes.taxon for leaf_nodes in edited_node.leaf_iter()]
edited_leaf_taxa = [leaf_nodes.label for leaf_nodes in edited_leafs]
edited_tree = dendropy.Tree(unedited_tree)
edited_tree.retain_taxa(edited_leafs)
edited_tree_string = edited_tree.as_string(schema="newick", suppress_rooting=True).strip()
for label_match in [match.group() for match in re.finditer('\)\d+',edited_tree_string)]:
edited_tree_string = edited_tree_string.replace(label_match, ')')
tree_to_modify.edited_tree = edited_tree_string.strip()
def remove_node(tree_to_modify):
edited_tree = dendropy.Tree.get_from_string(tree_to_modify.labeled_tree,"newick")
while not tree_to_modify.edited_tree:
user_selected_node = raw_input('Please select a node for removal: ')
if user_selected_node in [internal_nodes.label for internal_nodes in edited_tree.internal_nodes()]:
user_confirm = raw_input('Node ' + user_selected_node + ' found. Please confirm (y / n): ')
if user_confirm.lower().startswith('y'):
edited_node = edited_tree.find_node_with_label(user_selected_node)
edited_tree.prune_subtree(edited_node)
edited_tree_string = edited_tree.as_string(schema="newick", suppress_rooting=True).strip()
for label_match in [match.group() for match in re.finditer('\)\d+',edited_tree_string)]:
edited_tree_string = edited_tree_string.replace(label_match, ')')
tree_to_modify.edited_tree = edited_tree_string.strip()
def remove_leaf(tree_to_modify):
edited_tree = dendropy.Tree.get_from_string(tree_to_modify.labeled_tree,"newick")
leaf_list = [str(taxa_ids).replace("'",'') for taxa_ids in edited_tree.taxon_namespace]
while not tree_to_modify.edited_tree:
user_selected_leafs = raw_input('Please select a leaf (taxa) for removal (if mutiple, seperate with comma): ')
user_leafs = [current_leaf.strip() for current_leaf in user_selected_leafs.split(',')]
if len(user_leafs) == len(list(set(user_leafs) & set(leaf_list))):
user_confirm = raw_input('Leaf(s) ' + ', '.join(user_leafs) + ' found. Please confirm (y / n): ')
if user_confirm.lower().startswith('y'):
remove_list = [edited_tree.find_node_with_taxon_label(current_taxa).taxon for current_taxa in leaf_list if current_taxa in user_leafs]
edited_tree.prune_taxa(remove_list)
edited_tree_string = edited_tree.as_string(schema="newick").replace("'",'')
for label_match in [match.group() for match in re.finditer('\)\d+',edited_tree_string)]:
edited_tree_string = edited_tree_string.replace(label_match, ')')
tree_to_modify.edited_tree = edited_tree_string.strip()
def current_tree_user_request(requesting_job):
def tree_labeler(unlabeled_tree):
labeled_tree = list(unlabeled_tree.original_tree)
for pos, matched_nodes in enumerate([match.end() for match in re.finditer('\)', unlabeled_tree.original_tree)][::-1]):
labeled_tree.insert(matched_nodes,str(pos))
unlabeled_tree.labeled_tree = ''.join(labeled_tree)
def command_reqeust(command_job):
command_dict = {'1':'Subtree Selection', '2':'Node Removal', '3':'Leaf (Taxa) Removal', '4':'Keep Original'}
while not command_job.requested_command:
user_request = raw_input('Please select an action ( 1 / 2 / 3 / 4 ): ')
if user_request in ['1', '2', '3', '4']:
user_confirm = raw_input(command_dict[user_request] + '. Please confirm (y / n): ')
if user_confirm.lower().startswith('y'):
command_job.requested_command = command_dict[user_request]
if command_job.requested_command == 'Subtree Selection':
select_subtree(command_job)
command_job.job_status = 'Finished'
elif command_job.requested_command == 'Node Removal':
remove_node(command_job)
command_job.job_status = 'Finished'
elif command_job.requested_command == 'Leaf (Taxa) Removal':
remove_leaf(command_job)
command_job.job_status = 'Finished'
elif command_job.requested_command == 'Keep Original':
command_job.job_status = 'Finished'
command_job.edited_tree = command_job.original_tree
def outgroup_reqeust(command_job):
taxa_tree = dendropy.Tree.get_from_string(command_job.original_tree,"newick")
taxa_list = [str(taxa_ids).replace("'",'') for taxa_ids in taxa_tree.taxon_namespace]
while command_job.outgroup_request and not command_job.outgroups:
verify_outgroups = raw_input('Additional outgroup(s) required? (y / n): ')
if 'y' in verify_outgroups.lower():
while not command_job.outgroups:
selected_outgroups = raw_input('Please indicate outgroup(s) to add (if mutiple, seperate with comma): ')
outgroup_list = [current_outgroup.strip() for current_outgroup in selected_outgroups.split(',')]
user_confirm = raw_input('Following outgroup(s) selected: ' + ', '.join(outgroup_list) + '. Please confirm (y / n): ')
if user_confirm.lower().startswith('y'):
if len(outgroup_list) == len(list(set(outgroup_list) & set(taxa_list))):
command_job.outgroups = outgroup_list
else:
print 'Unknown outgroup detected'
else:
command_job.outgroup_request = False
tree_labeler(requesting_job)
screen_tree = dendropy.Tree.get_from_string(requesting_job.labeled_tree,"newick")
screen_tree.print_plot(show_internal_node_labels=True)
print 'Current Tree: ' + requesting_job.job_name + '\n'
print 'Possible actions\n____________________\n1. Subtree Selection\n2. Node Removal\n3. Leaf (Taxa) Removal\n4. Keep Original\n'
command_reqeust(requesting_job)
if requesting_job.requested_command != 'Keep Original':
outgroup_reqeust(requesting_job)
global bme_sequence_database_location, bme_output_directory
if not bme_sequence_database_location:
print 'Sequence database not found'
sys.exit()
check_output_dir('Subtrees')
current_log_data = []
if os.path.isfile(bme_subtree_log_file):
current_log_data = np_log_reader(bme_subtree_log_file)
completed_jobs = [completed_jobs.job_name for completed_jobs in current_log_data]
updated_log = open(bme_subtree_log_file,'a')
for tree_files in input_files:
if tree_files.current_input not in completed_jobs:
newick_tree = dendropy.Tree.get_from_path(tree_files.current_input,"newick").as_string(schema="newick", suppress_rooting=True).strip()
current_tree = nodes_picker_data(tree_files.current_input,['', newick_tree, ''])
current_tree_user_request(current_tree)
current_log_data.append(current_tree)
updated_log.write(str(current_tree))
updated_log.close()
else:
new_log = open(bme_subtree_log_file,'a')
for tree_files in input_files:
newick_tree = dendropy.Tree.get_from_path(tree_files.current_input,"newick").as_string(schema="newick", suppress_rooting=True).strip()
current_tree = nodes_picker_data(tree_files.current_input, ['', newick_tree, ''])
current_tree_user_request(current_tree)
current_log_data.append(current_tree)
new_log.write(str(current_tree))
new_log.close()
sequence_table = defaultdict(list)
for tree_data in current_log_data:
sequence_filename = '{0}/{1}.{2}'.format(bme_output_directory, return_filename_wo_ext(tree_data.job_name), return_extension(bme_sequence_database_location))
log_tree = dendropy.Tree.get_from_string(tree_data.edited_tree,"newick")
for taxa in [str(taxa_ids).replace("'",'') for taxa_ids in log_tree.taxon_namespace]:
sequence_table[taxa.strip().replace('#1','')].append(sequence_filename)
if tree_data.outgroups:
for current_outgroup in tree_data.outgroups:
sequence_table[current_outgroup.strip().replace('#1','')].append(sequence_filename)
try:
with open(sequence_filename): os.remove(sequence_filename)
except IOError:
pass
for working_sequence in sequence_reader(bme_sequence_database_location).read():
for seqeunce_keys in sequence_table.keys():
if seqeunce_keys in working_sequence.header:
for current_sequence_file in sequence_table[seqeunce_keys]:
sequence_output = open(current_sequence_file, 'a')
sequence_output.write(str(working_sequence) + '\n')
sequence_output.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ____ _ _____ _
### |___ \ | | | __ \| |
### __) |_ __ __| | | |__) | |__ __ _ ___ ___
### |__ <| '__/ _` | | ___/| '_ \ / _` / __|/ _ \
### ___) | | | (_| | | | | | | | (_| \__ \ __/
### |____/|_| \__,_| |_| |_| |_|\__,_|___/\___|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_metAl_compare:
### Details: Compares two sets of files
def vespa_metAl_compare (input_files):
print 'VESPA: metAl compare'
from collections import defaultdict
from subprocess import Popen, PIPE
import sys, shutil
global bme_metAl_compare_files, bme_metAl_compare_dir
def check_noRMD(location_list):
noRMD_values = {}
for pos, alingment_files in enumerate(location_list):
noRMD_program_check, noRMD_program_bin = False, False
try:
noRMD_program_call = Popen(['normd', alingment_files], stdout=PIPE, stderr=PIPE)
noRMD_program_check = True
except:
noRMD_program_bin = True
if noRMD_program_bin:
try:
noRMD_program_call = Popen(['./normd', alingment_files], stdout=PIPE, stderr=PIPE)
noRMD_program_check = True
except:
pass
if noRMD_program_check:
noRMD_output, noRMD_error = noRMD_program_call.communicate()
if not noRMD_error:
noRMD_values[location_list[pos]] = float(noRMD_output.strip())
else:
print 'Error detected with noRMD. Please confirm the program is correctly compiled'
sys.exit(0)
else:
print 'Error running noRMD. Please confirm the program is installed'
sys.exit(0)
return noRMD_values
def scoreMetAl (sent_compare_data, sent_output_dir):
global bme_metAl_cutoff
for alignment_ID, alignment_locations in sent_compare_data.items():
if len(alignment_locations) == 2:
metal_program_check, metal_program_bin = False, False
try:
metAl_program_call = Popen(['metal', alignment_locations[0], alignment_locations[1]], stdout=PIPE, stderr=PIPE)
metal_program_check = True
except:
metal_program_bin = True
if metal_program_bin:
try:
metAl_program_call = Popen(['./metal', alignment_locations[0], alignment_locations[1]], stdout=PIPE, stderr=PIPE)
metal_program_check = True
except:
pass
if metal_program_check:
#metAl_program_call = Popen(['metal', alignment_locations[0], alignment_locations[1]], stdout=PIPE, stderr=PIPE)
metAl_output, metAl_error = metAl_program_call.communicate()
if not metAl_error:
split_metAl = metAl_output.strip().split('= ')
if float(split_metAl[1]) < bme_metAl_cutoff:
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format(alignment_ID, split_metAl[0].strip(), split_metAl[1], 'null', 'null', alignment_locations[0]))
shutil.copy(alignment_locations[0], sent_output_dir)
else:
returned_values_dict = check_noRMD(alignment_locations)
alignment_compare = returned_values_dict.keys()
if returned_values_dict[alignment_compare[0]] == returned_values_dict[alignment_compare[1]]:
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format(alignment_ID, split_metAl[0].strip(), split_metAl[1], returned_values_dict[alignment_compare[0]], returned_values_dict[alignment_compare[1]], alignment_locations[0]))
shutil.copy(alignment_locations[0], sent_output_dir)
elif returned_values_dict[alignment_compare[0]] > returned_values_dict[alignment_compare[1]]:
if alignment_compare[0] == alignment_locations[0]:
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format(alignment_ID, split_metAl[0].strip(), split_metAl[1], returned_values_dict[alignment_compare[0]], returned_values_dict[alignment_compare[1]], alignment_compare[0]))
if alignment_compare[1] == alignment_locations[0]:
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format(alignment_ID, split_metAl[0].strip(), split_metAl[1], returned_values_dict[alignment_compare[1]], returned_values_dict[alignment_compare[0]], alignment_compare[0]))
shutil.copy(alignment_compare[0], sent_output_dir)
elif returned_values_dict[alignment_compare[0]] < returned_values_dict[alignment_compare[1]]:
if alignment_compare[0] == alignment_locations[0]:
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format(alignment_ID, split_metAl[0].strip(), split_metAl[1], returned_values_dict[alignment_compare[0]], returned_values_dict[alignment_compare[1]], alignment_compare[1]))
if alignment_compare[1] == alignment_locations[0]:
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format(alignment_ID, split_metAl[0].strip(), split_metAl[1], returned_values_dict[alignment_compare[1]], returned_values_dict[alignment_compare[0]], alignment_compare[1]))
shutil.copy(alignment_compare[1], sent_output_dir)
else:
print 'Error detected with metAl. Please confirm the program is correctly compiled'
sys.exit(0)
else:
print 'Error running metAl. Please confirm the program is installed'
sys.exit(0)
else:
print 'Cannot find comparison alignment for: {0}'.format(alignment_ID)
metal_compare_results = create_unique_file('metAl_compare.csv')
metal_compare_results.write('{0},{1},{2},{3},{4},{5}\n'.format('Alignment_ID', 'metAL_d_pos', 'metAL_score', 'noRMD_input', 'noRMD_compare', 'Selected_Alignment'))
metAl_output_dir, metAl_output_filename, metAl_output = '', '', ''
compare_dict = defaultdict(list)
for sequence_input in input_files:
if verify_alignment(sequence_input.current_input):
(metAl_output_dir, metAl_output_filename, metAl_output) = check_output(sequence_input, 'metAl_compare')
compare_dict[return_filename_wo_ext(sequence_input.current_input)].append(sequence_input.current_input)
else:
print return_filename(sequence_input.current_input) + ': Not an alignment file'
for sequence_compare_input in bme_metAl_compare_files:
if verify_alignment(sequence_compare_input):
compare_dict[return_filename_wo_ext(sequence_compare_input)].append(sequence_compare_input)
else:
print return_filename(sequence_compare_input) + ': Not an alignment file'
scoreMetAl(compare_dict, metAl_output_dir)
metal_compare_results.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_setup_prottest:
### Details: Creates the needed inputfile files for a prottest run
def vespa_setup_prottest(input_files):
print 'VESPA: ProtTest Setup'
import shutil
prottest_file = create_unique_file('setup_prottest_taskfarm')
for sequence_input in input_files:
if verify_alignment(sequence_input.current_input):
(prottest_output_dir, prottest_output_filename, prottest_output) = check_output(sequence_input, 'ProtTest_Setup')
shutil.copy(sequence_input.current_input, prottest_output_dir)
prottest_file.write('java -jar prottest.jar -i ' + prottest_output + ' -o ' + prottest_output_dir + '/' + remove_extension(prottest_output_filename) + '.models -all-distributions\n')
else:
print return_filename(sequence_input) + ': Not an alignment file'
prottest_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_prottest_reader:
### Details: Reads prottest output and creates generic model output (best/supported by MrBayes)
def vespa_prottest_reader (input_files):
print 'VESPA: ProtTest Results Reader'
def protest_verify(protest_output):
verify_model = False
alignment_file = ''
with open(protest_output) as model_file:
supported_by_mrbayes = ['Dayhoff', 'JTT', 'Blosum62', 'VT', 'WAG']
for check_model_file in [model_file.next() for x in xrange(3)]:
if 'ProtTest' in check_model_file:
verify_model = True
break
if verify_model:
for model_lines in model_file:
if 'Alignment file' in model_lines:
alignment_file = return_filename(model_lines.split(':')[-1].strip())
return alignment_file
def protest_output_reader(protest_output):
return_best_model, return_best_supported_model = ('', '')
with open(protest_output) as model_file:
supported_by_mrbayes = ['Dayhoff', 'JTT', 'Blosum62', 'VT', 'WAG']
data_block_test, end_of_block = (False, False)
for model_lines in model_file:
if data_block_test and ('-' * 75) == model_lines.strip():
end_of_block = True
elif data_block_test and not end_of_block:
split_model = model_lines.strip().split()
if not return_best_model:
return_best_model = split_model[0].split('+')
if split_model[0].split('+')[0] in supported_by_mrbayes and not return_best_supported_model:
return_best_supported_model = split_model[0].split('+')
elif ('-' * 75) == model_lines.strip():
data_block_test = True
return return_best_model, return_best_supported_model
report_best_models = create_unique_file('prottest_reader.best_models')
report_best_supported_models = create_unique_file('prottest_reader.best_supported_models')
for model_input in input_files:
best_model, best_supported_model = ('', '')
alignment_input = protest_verify(model_input.current_input)
if alignment_input:
best_model, best_supported_model = protest_output_reader(model_input.current_input)
report_best_models.write(alignment_input + ',' + '+'.join(best_model) + '\n')
report_best_supported_models.write(alignment_input + ',' + '+'.join(best_supported_model) + '\n')
report_best_models.close()
report_best_supported_models.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_setup_mrbayes:
### Details: Reads create Nexus files for MrBayes given fasta files and prottest supported file
def vespa_setup_mrbayes (input_files):
print 'VESPA: MrBayes Setup'
global bme_supported_model_list, bme_mrbayes_mcmc_gen, bme_mrbayes_mcmc_chains, bme_mrbayes_mcmc_temp, bme_mrbayes_mcmc_burnin
import os, sys
header_warning = False
def format_nexus (alignment_list, nexus_filename):
sequence_for_info = alignment_list[0]
sequence_for_info.seq_type()
sequence_length = len(sequence_for_info)
nexus_file = open(nexus_filename, 'w')
nexus_file.write('\n'.join(['#NEXUS', '', 'BEGIN DATA;', 'DIMENSIONS NTAX=' + str(len(alignment_list)) + ' NCHAR=' +
str(sequence_length) + ';', 'FORMAT DATATYPE=' + sequence_for_info.type + ' MISSING=- INTERLEAVE;', '', 'MATRIX']) + '\n')
current_sequence_position = 0
while current_sequence_position < sequence_length:
for alignment_seqeunce in alignment_list:
alignment_header = alignment_seqeunce.header.strip()[1:]
nexus_file.write(alignment_header + (' ' * (22 - len(alignment_header))) + ' '.join([alignment_seqeunce.sequence[seqeunce_block:seqeunce_block + 20] for seqeunce_block in range(current_sequence_position, current_sequence_position + 100, 20)]) + '\n')
current_sequence_position += 100
nexus_file.write('\n')
nexus_file.write('\n'.join([';', 'END;\n']))
nexus_file.close()
def convert_for_mrbayes (orginal_model):
if '+' in orginal_model:
command_list = orginal_model.split('+')
else:
command_list = [orginal_model,'']
convert_model = {'Dayhoff':'dayhoff', 'JTT':'jones', 'Blosum62':'blosum', 'VT':'vt','WAG':'wag'}
convert_options = {'I':'propinv', 'G':'gamma', 'IG':'invgamma', '':'equal'}
return convert_model[command_list[0]], convert_options[''.join(command_list[1:])]
if bme_supported_model_list:
supported_model_dict = {}
with open(bme_supported_model_list) as model_data:
for model_lines in model_data:
model_split = model_lines.strip().split(',')
supported_model_dict[model_split[0]] = model_split[1]
for sequence_input in input_files:
if verify_alignment(sequence_input.current_input):
if supported_model_dict.has_key(return_filename(sequence_input.current_input)):
mrbayes_lines = ['\nbegin mrbayes;', 'log start filename=Logs/' + return_filename_wo_ext(sequence_input.current_input) + '.log replace;', 'set autoclose=yes;']
(mrbayes_output_dir, mrbayes_output_filename, mrbayes_output) = check_output(sequence_input, 'MrBayes_Setup')
model_input, rate_input = convert_for_mrbayes(supported_model_dict[return_filename(sequence_input.current_input)])
mrbayes_lines.extend(['lset applyto=(all) rates=' + rate_input + ';', 'prset aamodelpr=fixed(' + model_input + ');',
'mcmcp ngen={0} printfreq=2000 samplefreq=200 nchains={1} temp={2} savebrlens=yes relburnin=yes burninfrac={3};'.format(bme_mrbayes_mcmc_gen, bme_mrbayes_mcmc_chains, bme_mrbayes_mcmc_temp, bme_mrbayes_mcmc_burnin),
'mcmc;', 'sumt;', 'sump;', 'log stop;', 'end;'])
nexus_sequence_input = []
for working_sequence in sequence_reader(sequence_input.current_input).read():
if len(working_sequence.header) > 22:
if not header_warning:
header_warning = True
print 'Warning: Sequence headers too long for NEXUS format - Editing headers for length (Manual editing beforehand is recommended)'
working_sequence.header = '{0}\n'.format(working_sequence.header[:22])
nexus_sequence_input.append(working_sequence)
coverted_filename = '{0}.nex'.format(remove_extension(mrbayes_output))
format_nexus(nexus_sequence_input, coverted_filename)
append_mrbayes_block = open(coverted_filename, 'a')
append_mrbayes_block.write('\n'.join(mrbayes_lines))
append_mrbayes_block.close()
else:
print 'No ProtTest model table specified. Please specify using -model_table='
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ___ _ _____ _
### |__ \ | | | __ \| |
### ) |_ __ __| | | |__) | |__ __ _ ___ ___
### / /| '_ \ / _` | | ___/| '_ \ / _` / __|/ _ \
### / /_| | | | (_| | | | | | | | (_| \__ \ __/
### |____|_| |_|\__,_| |_| |_| |_|\__,_|___/\___|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Functions: vespa_setup_reciprocal_input
### Details: creates an input database for a reciprocal run
def vespa_setup_reciprocal_input(input_files):
global bme_sequence_database_location
csv_list = []
for similarity_file in input_files:
with open(similarity_file.current_input) as similarity_data:
for similarity_lines in similarity_data:
if similarity_lines.strip().split()[1] not in csv_list:
csv_list.append(similarity_lines.strip().split()[1])
reciprocal_output = open(create_unique_file('Reciprocal_Input.' + bme_sequence_database_location.split('.')[-1]), 'w')
for working_sequence in sequence_reader(bme_sequence_database_location).read():
for csv_entries in csv_list:
if csv_entries in working_sequence.header:
reciprocal_output.write(str(working_sequence) + '\n')
reciprocal_output.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Functions: create_similarity_groups
### Details: create_similarity_groups: Creates connected components and creates sequence files
def create_similarity_groups(graph_data):
def merge_groups(merge_data):
merge_check = True
while merge_check:
merge_check = False
current_mergers = []
while merge_data:
query, subject_list = merge_data[0], merge_data[1:]
merge_data = []
for subject in subject_list:
if subject.isdisjoint(query):
merge_data.append(subject)
else:
merge_check = True
query.update(subject)
current_mergers.append(query)
merge_data = current_mergers
return merge_data
import os
from collections import defaultdict
global bme_output_directory, bme_sequence_database_location
sequence_table = defaultdict(str)
check_output_dir('Similarity_Groups')
merged_graph_data = merge_groups(graph_data)
total_number_of_files = len(str(len(merged_graph_data)))
for file_counter, sequence_list in enumerate(merged_graph_data):
similarity_filename = bme_output_directory + '/similarity_group_' + ('0' * (total_number_of_files - len(str(file_counter)))) + str(file_counter) + '.fasta'
for sequences in sequence_list:
sequence_table[sequences] = similarity_filename
try:
with open(similarity_filename): os.remove(similarity_filename)
except IOError:
pass
for working_sequence in sequence_reader(bme_sequence_database_location).read():
if sequence_table.has_key(working_sequence.header[1:].strip()):
sequence_output = open(sequence_table.pop(working_sequence.header[1:].strip()), 'a')
sequence_output.write(str(working_sequence) + '\n')
sequence_output.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Functions: assign_edges
### Details: assign_edges: Checks for thresholds and assigns edges using the correct format template
def assign_connections (assignment_graph, assign_data):
global bme_similarity_e_value_cutoff, bme_similarity_alignment_length_cutoff, bme_similarity_percent_identity_cutoff, bme_similarity_data_format
global blast_alignment_length_warn, hmmer_percent_identity_warn
pass_thresholds = True
assign_e_value, assign_percent_identity, assign_alignment_length = (False, False, False)
if bme_similarity_data_format == 'blast':
assign_e_value, assign_percent_identity = (float(assign_data[10]), float(assign_data[2]))
if bme_similarity_data_format == 'hmmer':
assign_e_value = float(assign_data[6])
assign_alignment_length = float(assign_data[2]) / float(assign_data[5])
if bme_similarity_e_value_cutoff:
if float(bme_similarity_e_value_cutoff) < assign_e_value:
pass_thresholds = False
if bme_similarity_percent_identity_cutoff:
if bme_similarity_data_format == 'hmmer':
if not hmmer_percent_identity_warn:
hmmer_percent_identity_warn = True
print 'Percent Identity: HMMER does not compute use percent identity - command ignored'
else:
if float(bme_similarity_percent_identity_cutoff) > assign_percent_identity:
pass_thresholds = False
if bme_similarity_alignment_length_cutoff:
if assign_alignment_length:
if float(bme_similarity_alignment_length_cutoff) > assign_alignment_length:
pass_thresholds = False
else:
if not blast_alignment_length_warn:
blast_alignment_length_warn = True
print 'Alignment length: cannot compute from BLAST output alone'
if pass_thresholds:
if assignment_graph.has_key((assign_data[1],assign_data[0])):
assignment_graph[(assign_data[1],assign_data[0])] = True
else:
assignment_graph[(assign_data[0],assign_data[1])] = False
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Functions: vespa_best_reciprocal_similarity_groups
### Details: Identifies best reciprocals between species
def vespa_best_reciprocal_similarity_groups(input_files):
def format_splitter(unsplit_similarity_line, similarity_format):
return_line = []
if similarity_format == 'blast':
return_line = unsplit_similarity_line.strip().split('\t')
if similarity_format == 'hmmer':
return_line = unsplit_similarity_line.strip().split()
return return_line
def return_compare_data(umcompared_line, similarity_format):
return_query_sequence, return_query_species = '', ''
return_subject_sequence, return_subject_species = '', ''
return_e_value = 1.0
if similarity_format == 'blast':
return_query_sequence, return_subject_sequence = umcompared_line[0], umcompared_line[1]
return_query_species, return_subject_species = return_query_sequence.split('|')[0], return_subject_sequence.split('|')[0]
return_e_value = float(umcompared_line[10])
if similarity_format == 'hmmer':
pass
return return_query_sequence, return_query_species, return_subject_sequence, return_subject_species, return_e_value
from collections import defaultdict
global bme_similarity_data_format
print 'VESPA: Best-Reciprocal Groups'
reciprocality_table = defaultdict(dict)
reciprocality_graph = {}
for similarity_file in input_files:
with open(similarity_file.current_input) as similarity_data:
for similarity_lines in similarity_data:
split_lines = format_splitter(similarity_lines, bme_similarity_data_format)
query_sequence, query_species, subject_sequence, subject_species, e_value = return_compare_data(split_lines, bme_similarity_data_format)
if query_species != subject_species:
if reciprocality_table[query_sequence].has_key(subject_species):
if e_value < reciprocality_table[query_sequence][subject_species][1]:
reciprocality_table[query_sequence][subject_species] = [subject_sequence, e_value]
else:
reciprocality_table[query_sequence][subject_species] = [subject_sequence, e_value]
for similarity_file in input_files:
with open(similarity_file.current_input) as similarity_data:
for similarity_lines in similarity_data:
split_lines = format_splitter(similarity_lines, bme_similarity_data_format)
query_sequence, query_species, subject_sequence, subject_species, e_value = return_compare_data(split_lines, bme_similarity_data_format)
if reciprocality_table[query_sequence].has_key(subject_species):
if reciprocality_table[query_sequence][subject_species][0] == subject_sequence:
assign_connections(reciprocality_graph, split_lines)
sub_graph = []
for connection, reciprocality_confirmation in reciprocality_graph.items():
if reciprocality_confirmation:
sub_graph.append(set(connection))
reciprocality_graph.clear()
create_similarity_groups(sub_graph)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Functions: vespa_similarity_groups
### Details: Identifies either simple or reciprocal connections within file
def vespa_similarity_groups(input_files, reciprocality_check):
def format_splitter(unsplit_similarity_line, similarity_format):
return_line = []
if similarity_format == 'blast':
return_line = unsplit_similarity_line.strip().split('\t')
if similarity_format == 'hmmer':
return_line = unsplit_similarity_line.strip().split()
return return_line
def test_selfhit(check_line, similarity_format):
return_check = True
if similarity_format == 'blast':
if check_line[0] == check_line[1]:
return_check = False
if similarity_format == 'hmmer':
pass
return return_check
if reciprocality_check:
print 'VESPA: Reciprocal Groups'
else:
print 'VESPA: Similarity Groups'
global bme_similarity_data_format
reciprocality_graph = {}
for similarity_file in input_files:
with open(similarity_file.current_input) as similarity_data:
for similarity_lines in similarity_data:
split_lines = format_splitter(similarity_lines, bme_similarity_data_format)
if test_selfhit(split_lines, bme_similarity_data_format):
assign_connections(reciprocality_graph, split_lines)
if not reciprocality_check:
graph = []
for connection in reciprocality_graph.keys():
graph.append(set(connection))
reciprocality_graph.clear()
create_similarity_groups(graph)
else:
sub_graph = []
for connection, reciprocality_confirmation in reciprocality_graph.items():
if reciprocality_confirmation:
sub_graph.append(set(connection))
reciprocality_graph.clear()
create_similarity_groups(sub_graph)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### __ _ _____ _
### /_ | | | | __ \| |
### | |___| |_ | |__) | |__ __ _ ___ ___
### | / __| __| | ___/| '_ \ / _` / __|/ _ \
### | \__ \ |_ | | | | | | (_| \__ \ __/
### |_|___/\__| |_| |_| |_|\__,_|___/\___|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_clean:
### Details: Removes sequences from the specified filepath that are not divisible by three
def vespa_clean (input_files):
print 'VESPA: Cleaning sequences'
removed_in_cleanfile = create_unique_file('cleaned_genes_removed.log')
global bme_remove_internal_stop, bme_label_with_filename, bme_infer_labels_ensembl
for sequence_input in input_files:
removed_in_cleanfile.write('Cleaning File: {0}\n'.format(return_filename(sequence_input.current_input)))
(cleaned_output_dir, cleaned_output_filename, cleaned_ouput) = check_output(sequence_input, 'Cleaned')
bme_clean_file = open(cleaned_ouput, 'w')
for working_sequence in sequence_reader(sequence_input.current_input).read():
if len(working_sequence) % 3 == 0:
remove_check = False
if bme_remove_internal_stop:
working_sequence.type = 'DNA'
if working_sequence.internal_stop():
remove_check = True
removed_in_cleanfile.write('Gene removed - Internal stop-codon found: {0}'.format(working_sequence.header))
if not remove_check:
if bme_label_with_filename:
working_sequence.header = '>{0}|{1}'.format(return_filename_wo_ext(sequence_input.current_input), working_sequence.header[1:])
if bme_infer_labels_ensembl:
species_header = ensembl_infer(working_sequence.header)
if species_header:
working_sequence.header = '>{0}|{1}'.format(species_header, working_sequence.header[1:])
bme_clean_file.write(str(working_sequence) + '\n')
else:
removed_in_cleanfile.write('Gene removed - Abnormal sequence length: {0}'.format(working_sequence.header))
bme_clean_file.close()
removed_in_cleanfile.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_clean_ensembl:
### Details: Cleans ensembl genome, returns longest transcripts divisible by three
def vespa_clean_ensembl (input_files):
print 'VESPA: Cleaning ENSEBML sequences'
global bme_remove_internal_stop, bme_label_with_filename, bme_infer_labels_ensembl
removed_in_cleanfile = create_unique_file('cleaned_ensembl_removed.log')
from collections import defaultdict
for sequence_input in input_files:
removed_in_cleanfile.write('Cleaning File: {0}\n'.format(return_filename(sequence_input.current_input)))
(cleaned_output_dir, cleaned_output_filename, cleaned_output) = check_output(sequence_input, 'Cleaned')
bme_clean_file = open(cleaned_output, 'w')
geneDict = defaultdict(list)
geneKeys = []
for working_sequence in sequence_reader(sequence_input.current_input).read():
geneData = working_sequence.header.strip().split('|')
if len(working_sequence) % 3 == 0:
remove_check = False
if bme_remove_internal_stop:
working_sequence.type = 'DNA'
if working_sequence.internal_stop():
remove_check = True
removed_in_cleanfile.write('Gene removed - Internal stop codon found: {0}'.format(working_sequence.header))
if not remove_check:
if geneDict.has_key(geneData[0]):
if geneDict[geneData[0]][1] < len(working_sequence):
removed_in_cleanfile.write('Gene removed - Longer transcript found: {0}'.format(geneDict[geneData[0]][0]))
geneDict[geneData[0]] = [working_sequence.header.strip(), len(working_sequence)]
else:
removed_in_cleanfile.write('Gene removed - Longer transcript found: {0}'.format(working_sequence.header))
else:
geneDict[geneData[0]] = [working_sequence.header.strip(), len(working_sequence)]
else:
removed_in_cleanfile.write('Gene removed - Abnormal sequence length: {0}'.format(working_sequence.header))
geneKeys = [longest_gene[0] for longest_gene in geneDict.values()]
for working_sequence in sequence_reader(sequence_input.current_input).read():
if working_sequence.header.strip() in geneKeys:
if bme_label_with_filename:
working_sequence.header = '>{0}|{1}'.format(return_filename_wo_ext(sequence_input.current_input), working_sequence.header[1:])
if bme_infer_labels_ensembl:
species_header = ensembl_infer(working_sequence.header)
if species_header:
working_sequence.header = '>{0}|{1}'.format(species_header, working_sequence.header[1:])
bme_clean_file.write(str(working_sequence) + '\n')
bme_clean_file.close()
removed_in_cleanfile.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_reverse_complement:
### Details: Returns the reverse complement of the sequence in the specified filepath
def vespa_reverse_complement (input_files):
print 'VESPA: Reverse Complementing Sequences'
global bme_label_with_filename, bme_infer_labels_ensembl
for sequence_input in input_files:
(reversed_output_dir, reversed_output_filename, reversed_output) = check_output(sequence_input, 'RevComp')
bme_revcomp_file = open(reversed_output, 'w')
for working_sequence in sequence_reader(sequence_input.current_input).read():
if bme_label_with_filename:
working_sequence.header = '>{0}|{1}'.format(return_filename_wo_ext(sequence_input.current_input), working_sequence.header[1:])
if bme_infer_labels_ensembl:
species_header = ensembl_infer(working_sequence.header)
if species_header:
working_sequence.header = '>{0}|{1}'.format(species_header, working_sequence.header[1:])
bme_revcomp_file.write(str(working_sequence.seq_revcomp()) + '\n')
bme_revcomp_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_translate:
### Details: Translate the specified filepath from DNA to Protien
def vespa_translate (input_files):
print 'VESPA: Translating Sequences'
global bme_remove_internal_stop, bme_remove_terminal_stop, bme_label_with_filename, bme_infer_labels_ensembl
clean_warn = False
removed_in_transfile = create_unique_file('translated_genes_removed.log')
for sequence_input in input_files:
removed_in_transfile.write('Translating File: {0}\n'.format(sequence_input.current_input))
(translated_output_dir, translated_output_filename, translated_output) = check_output(sequence_input, 'Translated')
bme_translate_file = open(translated_output, 'w')
for working_sequence in sequence_reader(sequence_input.current_input).read():
if len(working_sequence) % 3 == 0:
working_sequence.seq_translate()
remove_check = False
if bme_remove_internal_stop:
working_sequence.type = 'protein'
if working_sequence.internal_stop():
removed_in_transfile.write('Gene removed - Internal stop codon found: {0}'.format(working_sequence.header))
remove_check = True
if not remove_check:
if bme_remove_terminal_stop:
if working_sequence.sequence[-1] == '*':
working_sequence.sequence = working_sequence.sequence[:-1]
if bme_label_with_filename:
working_sequence.header = '>{0}|{1}'.format(return_filename_wo_ext(sequence_input.current_input), working_sequence.header[1:])
if bme_infer_labels_ensembl:
species_header = ensembl_infer(working_sequence.header)
if species_header:
working_sequence.header = '>{0}|{1}'.format(species_header, working_sequence.header[1:])
bme_translate_file.write(str(working_sequence) + '\n')
else:
if not clean_warn:
clean_warn= True
print 'Warning: Abnormal sequence length detected. Please confirm seqeunces are DNA and have been cleaned'
removed_in_transfile.write('Gene removed - Abnormal sequence length: {0}'.format(working_sequence.header))
bme_translate_file.close()
removed_in_transfile.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_create_database:
### Details: Returns a database of all sequences in the specified filepath
def vespa_create_database (input_files):
def assign_database_filename(assign_filename):
if assign_filename:
return create_unique_file(assign_filename)
else:
return create_unique_file('database.fas')
print 'VESPA: Creating Database'
global bme_format_blast_database, bme_output_filename
import subprocess
sequence_type = ''
database_created = False
for sequence_input in input_files:
if not database_created:
bme_database_file = assign_database_filename(bme_output_filename)
database_created = True
for working_sequence in sequence_reader(sequence_input.current_input).read():
bme_database_file.write(str(working_sequence) + '\n')
if not sequence_type:
working_sequence.seq_type()
sequence_type = working_sequence.type
bme_database_file.close()
if bme_format_blast_database:
type_convert = {'protein':'prot','DNA':'nucl'}
try:
blast_test = subprocess.Popen(['makeblastdb', '-dbtype', type_convert[sequence_type], '-in', bme_database_file.name], stdout=subprocess.PIPE,stderr=subprocess.PIPE, stdin=subprocess.PIPE)
blast_out, blast_error = blast_test.communicate()
if not blast_error:
print 'VESPA: Formatting BLAST Database'
else:
print 'Error with makeblastdb function. Aborting format'
except:
print 'Cannot locate makeblastdb function. Aborting format'
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_individual_sequences:
### Details: Returns single files of sequences in the specified filepath
def vespa_individual_sequences (input_files):
def return_sequence_filename(sequence_header):
return_seq_filename = ''
for header_characters in sequence_header:
if return_seq_filename:
if header_characters == '|':
return_seq_filename += '_'
elif header_characters == '_':
return_seq_filename += header_characters
elif header_characters == ' ':
pass
elif not header_characters.isalnum():
break
if header_characters.isalnum():
return_seq_filename += header_characters
return return_seq_filename
import os
print 'VESPA: Creating Individual Sequences'
global bme_output_directory
directory_created = False
for sequence_input in input_files:
if not directory_created:
(individual_output_dir, individual_output_filename, individual_output) = check_output(sequence_input, 'Individual')
check_output_dir(individual_output_dir)
directory_created = True
for working_sequence in sequence_reader(sequence_input.current_input).read():
individual_filename = '{0}/{1}'.format(bme_output_directory,return_sequence_filename(working_sequence.header.strip()))
if '.' in sequence_input.current_input:
individual_filename += '.{0}'.format(sequence_input.current_input.split('.')[-1])
bme_individual_file = open(individual_filename, 'w')
bme_individual_file.write(str(working_sequence) + '\n')
bme_individual_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_split_in_groups:
### Details: Returns files that each contain multiple sequences
def vespa_split_in_groups (input_files):
import os
print 'VESPA: Creating sequence groups'
global bme_split_number_in_groups, bme_output_directory
total_sequences, total_files, sequence_counter, group_counter = 0, 0, 0, 0
for sequence_input in input_files:
with open(sequence_input.current_input) as data_for_totals:
for lines_for_totals in data_for_totals:
if '>' in lines_for_totals:
total_sequences += 1
total_files = len(str(total_sequences/bme_split_number_in_groups))
initial_loop = True
print bme_split_number_in_groups
for sequence_input in input_files:
for working_sequence in sequence_reader(sequence_input.current_input).read():
if initial_loop:
(split_output_dir, split_output_filename, split_output) = check_output(sequence_input, 'Split')
check_output_dir(split_output_dir)
split_filename = '{0}/sequence_group_{1}'.format(bme_output_directory, ('0' * (total_files - len(str(group_counter)))) + str(group_counter))
if '.' in sequence_input.current_input:
split_filename += '.{0}'.format(return_extension(sequence_input.current_input))
bme_split_file = open(split_filename, 'w')
initial_loop = False
if sequence_counter == bme_split_number_in_groups:
group_counter += 1
bme_split_file.close()
split_filename = '{0}/sequence_group_{1}'.format(bme_output_directory, ('0' * (total_files - len(str(group_counter)))) + str(group_counter))
if '.' in sequence_input.current_input:
split_filename += '.{0}'.format(return_extension(sequence_input.current_input))
bme_split_file = open(split_filename, 'w')
bme_split_file.write(str(working_sequence) + '\n')
sequence_counter = 0
else:
bme_split_file.write(str(working_sequence) + '\n')
sequence_counter += 1
bme_split_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_gene_selection:
### Details: Returns single files of sequences in the specified filepath if present within a csv file
def vespa_gene_selection (input_files):
def return_sequence_filename(sequence_header):
return_seq_filename = ''
for header_characters in sequence_header:
if return_seq_filename:
if header_characters == '|':
return_seq_filename += '_'
elif header_characters == '_':
return_seq_filename += header_characters
elif header_characters == ' ':
pass
elif not header_characters.isalnum():
break
if header_characters.isalnum():
return_seq_filename += header_characters
return return_seq_filename
import csv
print 'VESPA: Gene selection'
global bme_selection_csv, bme_output_directory
csv_found, csv_missing, csv_list = ([], [], [row[0].strip() for row in csv.reader(open(bme_selection_csv, 'rU'))])
directory_created = False
for sequence_input in input_files:
if not directory_created:
(selected_output_dir, selected_output_filename, selected_output) = check_output(sequence_input, 'Selected')
check_output_dir(selected_output_dir)
directory_created = True
for working_sequence in sequence_reader(sequence_input.current_input).read():
for csv_entries in csv_list:
if csv_entries in working_sequence.header:
csv_found.append(csv_entries)
selection_filename = '{0}/{1}'.format(bme_output_directory, csv_entries)
if '.' in sequence_input.current_input:
selection_filename += '.{0}'.format(return_extension(sequence_input.current_input))
bme_selection_file = open(selection_filename, 'w')
bme_selection_file.write(str(working_sequence) + '\n')
bme_selection_file.close()
csv_missing = list(set(csv_list) - set(csv_found))
if csv_missing:
print '{0} genes not found, creating file: missing_genes.log'.format(len(csv_missing))
bme_missing_file = create_unique_file('missing_genes.log')
for missing_entries in csv_missing:
bme_missing_file.write(missing_entries.strip() + '\n')
bme_missing_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_check_SGO:
### Details: Checks for SGOs using seqeunce headers.
def vespa_check_SGO (input_files):
import os
from collections import defaultdict
print 'VESPA: Checking SGO status'
check_SGO_log = create_unique_file('SGO_Check.log')
for sequence_input in input_files:
sgo_status = True
species_counter = defaultdict(int)
for working_sequence in sequence_reader(sequence_input.current_input).read():
current_species = working_sequence.header[1:].strip().split('|')[0]
if species_counter.has_key(current_species):
species_counter[current_species] += 1
else:
species_counter[current_species] = 1
for species_counts in species_counter.values():
if species_counts != 1:
sgo_status = False
if sgo_status:
check_SGO_log.write('{0},PASS\n'.format(sequence_input.current_input))
else:
check_SGO_log.write('{0},FAIL\n'.format(sequence_input.current_input))
check_SGO_log.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: vespa_reduce_ensembl:
### Details: Reduces the length of Ensembl ID headers
def vespa_reduce_ensembl (input_files):
print 'VESPA: Reduce Ensembl Created Headers'
reduced_conversion = create_unique_file('reduced_conversion.log')
for sequence_input in input_files:
(reduced_output_dir, reduced_output_filename, reduced_output) = check_output(sequence_input, 'Reduced')
bme_reduced_file = open(reduced_output, 'w')
for working_sequence in sequence_reader(sequence_input.current_input).read():
original_list, reduced_list = working_sequence.header.strip()[1:].split('|'), []
for header_entries in original_list:
check_header = header_entries.upper()
if check_header.startswith('ENS'):
if check_header.split('0',1)[0].endswith('G'):
reduced_list.append(header_entries)
else:
reduced_list.append(header_entries)
reduced_header = '>{0}\n'.format('|'.join(reduced_list))
reduced_conversion.write('{0},{1}\n'.format(working_sequence.header.strip(),reduced_header.strip()))
working_sequence.header = reduced_header
bme_reduced_file.write(str(working_sequence) + '\n')
bme_reduced_file.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### _____ _ _ _ __ __ _ _ _
### / ____| | | | | | \ \ / / (_) | | | |
### | | __| | ___ | |__ __ _| | \ \ / /_ _ _ __ _ __ _| |__ | | ___ ___
### | | |_ | |/ _ \| '_ \ / _` | | \ \/ / _` | '__| |/ _` | '_ \| |/ _ \/ __|
### | |__| | | (_) | |_) | (_| | | \ / (_| | | | | (_| | |_) | | __/\__ \
### \_____|_|\___/|_.__/ \__,_|_| \/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Global variables
bme_command_table = ['ensembl_clean', 'clean', 'translate', 'rev_complement', 'create_database', 'individual_sequences', 'gene_selection',
'sgo_check', 'best_reciprocal_groups', 'reciprocal_groups', 'similarity_groups', 'create_subtrees', 'map_alignments',
'infer_genetree', 'codeml_setup', 'split_sequences', 'codeml_reader', 'setup_reciprocal_input', 'prottest_setup',
'prottest_reader', 'metal_compare', 'mrbayes_setup', 'create_branch', 'mrbayes_reader', 'link_input', 'reduce_ensembl',
'h', 'help']
bme_sequence_database_location = ''
bme_assign_database_filename = ''
bme_output_directory = ''
bme_output_filename = ''
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### General sequence tool variables
bme_remove_internal_stop = True
bme_remove_terminal_stop = True
bme_label_with_filename = False
bme_infer_labels_ensembl = False
bme_split_number_in_groups = 100
bme_selection_csv = ''
bme_format_blast_database = False
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### General similarity variables
bme_similarity_bme_format_location = ''
bme_similarity_data_format = 'blast'
bme_similarity_e_value_cutoff = ''
bme_similarity_percent_identity_cutoff = ''
bme_similarity_alignment_length_cutoff = ''
blast_alignment_length_warn = False
hmmer_percent_identity_warn = False
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### General 3rd phase variables
bme_metAl_compare_files = []
bme_metAl_compare_dir = False
bme_metAl_cutoff = 0.05
bme_supported_model_list = ''
bme_mrbayes_mcmc_gen = 200000
bme_mrbayes_mcmc_chains = 4
bme_mrbayes_mcmc_temp = 0.2
bme_mrbayes_mcmc_burnin = 0.25
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### General tree variables
bme_subtree_log_file = 'vespa_subtrees.log'
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### General codeML variables
bme_species_tree = ''
bme_branch_label_table = ''
bme_in_paralogs = False
bme_alignment_path = ''
bme_main_output = ''
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### _____ _ _ _
### / ____| | | | (_)
### | | ___ _ __ ___ _ __ ___ __ _ _ __ __| | | _ _ __ ___
### | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` | | | | '_ \ / _ \
### | |___| (_) | | | | | | | | | | | (_| | | | | (_| | |____| | | | | __/
### \_____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_|______|_|_| |_|\___|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### Function: command_line:
### Details: Handles user commands and assigning variables
def command_line():
global bme_sequence_database_location, bme_assign_database_filename, bme_output_directory, bme_output_filename, bme_command_table
global bme_remove_internal_stop, bme_remove_terminal_stop, bme_label_with_filename, bme_infer_labels_ensembl, bme_split_number_in_groups, bme_selection_csv, bme_format_blast_database
global bme_similarity_e_value_cutoff, bme_similarity_percent_identity_cutoff, bme_similarity_alignment_length_cutoff
global bme_metAl_compare_files, bme_metAl_compare_dir, bme_metAl_cutoff, bme_supported_model_list, bme_mrbayes_mcmc_gen, bme_mrbayes_mcmc_chains, bme_mrbayes_mcmc_temp, bme_mrbayes_mcmc_burnin
global bme_species_tree, bme_branch_label_table, bme_in_paralogs, bme_alignment_path
def command_splitter (command_array):
command_return, option_return = '', []
for command_groups in command_array:
if command_return and not command_groups.startswith('-'):
yield command_return, option_return
command_return = command_groups
option_return = []
if not command_return and not command_groups.startswith('-'):
command_return = command_groups
if command_groups.startswith('-'):
option_return.append(command_groups)
yield command_return, option_return
def assign_input_files (input_directory, input_varible):
import os, sys
input_to_return = []
if input_directory:
for path, sub_dirs, file_list in os.walk(input_varible):
for files in file_list:
if not files.startswith('.'):
input_to_return.append(command_line_data(os.path.join(path, files), True))
else:
input_to_return.append(command_line_data(input_varible, False))
return input_to_return
def assign_compare_files(input_varible):
import os, sys
if check_if_input_directory(input_varible):
for path, sub_dirs, file_list in os.walk(input_varible):
for files in file_list:
if not files.startswith('.'):
bme_metAl_compare_dir = True
bme_metAl_compare_files.append(os.path.join(path, files))
else:
bme_metAl_compare_files.append(input_varible)
import sys, os
command_input = []
input_directory_check = False
if 'h' in sys.argv[1:]:
if len(sys.argv[1:]) > 1:
for help_request in sys.argv[2:]:
help_message(help_request)
else:
help_message('')
sys.exit()
elif 'help' in sys.argv[1:]:
if len(sys.argv[1:]) > 1:
for help_request in sys.argv[2:]:
help_message(help_request)
else:
help_message('')
sys.exit()
for current_command, options_list in command_splitter(sys.argv[1:]):
for options in options_list:
if options.startswith('-input='):
input_directory_check = check_if_input_directory(options.split('=')[1])
command_input = assign_input_files(input_directory_check, options.split('=')[1])
for options in options_list:
if options.startswith('-output='):
bme_output_directory = options.split('=')[1]
bme_output_filename = options.split('=')[1]
if input_directory_check:
for input_in_dir in command_input:
input_in_dir.current_output_filename = input_in_dir.current_input.split('/')[-1]
input_in_dir.current_output_dir = options.split('=')[1]
input_in_dir.current_output = '{0}/{1}'.format(options.split('=')[1], input_in_dir.current_input.split('/')[-1])
else:
for input_in_dir in command_input:
input_in_dir.current_output = options.split('=')[1]
elif options.startswith('-label_filename='):
if 'true' in options.split('=')[1].lower():
bme_label_with_filename = True
else:
bme_label_with_filename = False
elif options.startswith('-infer_ensembl_species='):
if 'true' in options.split('=')[1].lower():
bme_infer_labels_ensembl = True
else:
bme_infer_labels_ensembl = False
elif options.startswith('-rm_internal_stop='):
if 'true' in options.split('=')[1].lower():
bme_remove_internal_stop = True
else:
bme_remove_internal_stopp = False
elif options.startswith('-cleave_terminal='):
if 'true' in options.split('=')[1].lower():
bme_remove_terminal_stop = True
else:
bme_remove_terminal_stop = False
elif options.startswith('-format_blast='):
if 'true' in options.split('=')[1].lower():
bme_format_blast_database = True
else:
bme_format_blast_database = False
elif options.startswith('-selection_csv='):
bme_selection_csv = options.split('=')[1]
elif options.startswith('-output_database='):
bme_assign_database_filename = options.split('=')[1]
elif options.startswith('-split_number='):
bme_split_number_in_groups = int(options.split('=')[1])
elif options.startswith('-subtree_log='):
bme_subtree_log_file = options.split('=')[1]
elif options.startswith('-species_tree='):
bme_species_tree = options.split('=')[1]
elif options.startswith('-branch_file='):
bme_branch_label_table = options.split('=')[1]
elif options.startswith('-allow_inparalogs='):
if 'true' in options.split('=')[1].lower():
bme_in_paralogs = True
else:
bme_in_paralogs = False
elif options.startswith('-format='):
bme_similarity_data_format = options.split('=')[1]
elif options.startswith('-e_value='):
bme_similarity_e_value_cutoff = float(options.split('=')[1])
elif options.startswith('-percent_identity='):
bme_similarity_percent_identity_cutoff = float(options.split('=')[1])
elif options.startswith('-alignment_length='):
bme_similarity_alignment_length_cutoff = float(options.split('=')[1])
elif options.startswith('-database='):
bme_sequence_database_location = options.split('=')[1]
elif options.startswith('-alignment_path='):
bme_alignment_path = options.split('=')[1]
elif options.startswith('-compare='):
assign_compare_files(options.split('=')[1])
elif options.startswith('-metal_cutoff='):
bme_metAl_cutoff = float(options.split('=')[1])
elif options.startswith('-model_list='):
bme_supported_model_list = options.split('=')[1]
elif options.startswith('-mcmc_gen='):
bme_mrbayes_mcmc_gen = int(options.split('=')[1])
elif options.startswith('-mcmc_chains='):
bme_mrbayes_mcmc_chains = int(options.split('=')[1])
elif options.startswith('-mcmc_temp='):
bme_mrbayes_mcmc_temp = float(options.split('=')[1])
elif options.startswith('-mcmc_burnin='):
bme_mrbayes_mcmc_burnin = float(options.split('=')[1])
if current_command.lower() in bme_command_table:
if command_input:
#1st Phase
if 'ensembl_clean' in current_command.lower():
vespa_clean_ensembl(command_input)
elif 'clean' in current_command.lower():
vespa_clean(command_input)
elif 'translate' in current_command.lower():
vespa_translate(command_input)
elif 'rev_complement' in current_command.lower():
vespa_reverse_complement(command_input)
elif 'create_database' in current_command.lower():
vespa_create_database(command_input,)
elif 'individual_sequences' in current_command.lower():
vespa_individual_sequences(command_input)
elif 'split_sequences' in current_command.lower():
vespa_split_in_groups(command_input)
elif 'gene_selection' in current_command.lower():
vespa_gene_selection(command_input)
elif 'sgo_check' in current_command.lower():
vespa_check_SGO(command_input)
elif 'reduce_ensembl' in current_command.lower():
vespa_reduce_ensembl(command_input)
#2nd Phase
elif 'setup_reciprocal_input' in current_command.lower():
vespa_setup_reciprocal_input(command_input)
elif 'best_reciprocal_groups' in current_command.lower():
vespa_best_reciprocal_similarity_groups(command_input)
elif 'reciprocal_groups' in current_command.lower():
vespa_similarity_groups(command_input,True)
elif 'similarity_groups' in current_command.lower():
vespa_similarity_groups(command_input,False)
#3rd Phase
elif 'metal_compare' in current_command.lower():
vespa_metAl_compare(command_input)
elif 'prottest_setup' in current_command.lower():
vespa_setup_prottest(command_input)
elif 'prottest_reader' in current_command.lower():
vespa_prottest_reader(command_input)
elif 'mrbayes_setup' in current_command.lower():
vespa_setup_mrbayes(command_input)
#4th Phase
elif 'mrbayes_reader' in current_command.lower():
vespa_mrbayes_reader(command_input)
elif 'create_branch' in current_command.lower():
vespa_branch_table(command_input)
elif 'create_subtrees' in current_command.lower():
vespa_subtrees(command_input)
elif 'map_alignments' in current_command.lower():
vespa_map_protein_gaps(command_input)
elif 'infer_genetree' in current_command.lower():
vespa_infer_genetree(command_input)
elif 'link_input' in current_command.lower():
vespa_link_input(command_input)
elif 'codeml_setup' in current_command.lower():
vespa_codeml_setup(command_input)
#5th Phase
elif 'codeml_reader' in current_command.lower():
vespa_codeml_reader(command_input)
else:
print 'No input specified for command: {0}. Please check command-line input'.format(current_command)
else:
print 'Specified command ({0}) not found. Please check command-line input'.format(current_command)
import sys, os
if len(sys.argv) > 1:
command_line()
else:
help_message('')
|
aewebb80/VESPA
|
vespa.py
|
Python
|
gpl-3.0
| 163,608
|
[
"BLAST"
] |
a9f5b8619f2aaa8b2350f4c7044882e7c3502fc5f1de5fb0fa6653449d559a0b
|
from math import pi
import numpy as np
from pysisyphus.calculators.Calculator import Calculator
from pysisyphus.constants import KB, AU2J
from pysisyphus.intcoords.PrimTypes import prims_from_prim_inputs
from pysisyphus.intcoords.update import correct_dihedrals
from pysisyphus.intcoords import Torsion
class LogFermi:
def __init__(self, beta, radius, T=300, origin=(0.0, 0.0, 0.0), geom=None):
"""As described in the XTB docs.
https://xtb-docs.readthedocs.io/en/latest/xcontrol.html#confining-in-a-cavity
"""
self.beta = beta
self.radius = radius
self.T = T
self.origin = np.array(origin)
# In Hartree
self.kT = KB * self.T / AU2J
def calc(self, coords3d, gradient=False):
t0 = coords3d - self.origin[None, :]
t1 = np.linalg.norm(t0, axis=1)
t2 = np.exp(self.beta * (t1 - self.radius))
energy = (self.kT * np.log(1 + t2)).sum()
if not gradient:
return energy
grad = self.kT * ((self.beta * t2) / ((1 + t2) * t1))[:, None] * t0
return energy, grad.flatten()
def __repr__(self):
return (
f"LogFermi(beta={self.beta:.6f}, radius={self.radius:.6f}, "
f"T={self.T:.6f}, origin={self.origin})"
)
class HarmonicSphere:
def __init__(self, k, radius, origin=(0.0, 0.0, 0.0), geom=None):
self.k = k
self.radius = radius
self.origin = np.array(origin)
def calc(self, coords3d, gradient=False):
c3d_wrt_origin = coords3d - self.origin
distances = np.linalg.norm(c3d_wrt_origin, axis=1)
energies = np.where(distances > self.radius, self.k * distances ** 2, 0.0)
energy = energies.sum()
if not gradient:
return energy
"""
E(r(x)) = k*r**2
dE(r(x))/dx = dE/dr * dr/dx
dE/dr = 2*k*r
dr/dx = x/r
dE/dr * dr/dx = 2*k*x
"""
grad = np.where(distances > self.radius, 2 * self.k * c3d_wrt_origin, 0.0)
return energy, grad.flatten()
@property
def surface_area(self):
"""In Bohr**2"""
return 4 * pi * self.radius ** 2
def instant_pressure(self, coords3d):
_, gradient = self.calc(coords3d, gradient=True)
norm = np.linalg.norm(gradient)
p = norm / self.surface_area
return p
class Restraint:
def __init__(self, restraints, geom=None):
self.restraints = list()
for prim_inp, *rest in restraints:
prims = prims_from_prim_inputs((prim_inp, ))
assert len(prims) == 1
prim = prims[0]
force_const = rest.pop(0)
try:
ref_val = rest.pop(0)
except IndexError:
assert (
geom is not None
), "Need initial coordinates when no reference value is specified!"
ref_val = prim.calculate(geom.coords3d)
self.restraints.append((prim, force_const, ref_val))
@staticmethod
def calc_prim_restraint(prim, coords3d, force_const, ref_val):
val, grad = prim.calculate(coords3d, gradient=True)
if isinstance(prim, Torsion):
# correct_dihedrals always returns a 1d array, even for scalar inputs
val = correct_dihedrals(val, ref_val)[0]
diff = val - ref_val
pot = force_const * diff ** 2
pot_grad = 2 * force_const * diff * grad
return pot, pot_grad
def calc(self, coords3d, gradient=False):
energy = 0.0
grad = np.zeros(coords3d.size)
for prim, force_const, ref_val in self.restraints:
penergy, pgrad = self.calc_prim_restraint(
prim, coords3d, force_const, ref_val
)
energy += penergy
grad += pgrad
if not gradient:
return energy
return energy, grad.flatten()
class ExternalPotential(Calculator):
available_potentials = {
"logfermi": LogFermi,
"harmonic_sphere": HarmonicSphere,
"restraint": Restraint,
}
def __init__(self, calculator=None, potentials=None, geom=None, **kwargs):
super().__init__(**kwargs)
self.calculator = calculator
self.potentials = list()
self.log("Creating external potentials")
for i, pot_kwargs in enumerate(potentials):
pot_kwargs.update({"geom": geom})
pot_key = pot_kwargs.pop("type")
pot_cls = self.available_potentials[pot_key]
pot = pot_cls(**pot_kwargs)
self.potentials.append(pot)
self.log(f"\t{i:02d}: {pot}")
def get_potential_energy(self, coords):
coords3d = coords.reshape(-1, 3)
potential_energies = [pot.calc(coords3d) for pot in self.potentials]
potential_energy = sum(potential_energies)
self.log(f"Energies from external potential: {potential_energies}")
return potential_energy
def get_energy(self, atoms, coords):
potential_energy = self.get_potential_energy(coords)
if self.calculator is not None:
results = self.calculator.get_energy(atoms, coords)
else:
results = {"energy": 0.0}
results["energy"] += potential_energy
return results
def get_potential_forces(self, coords):
coords3d = coords.reshape(-1, 3)
energies_gradients = [
pot.calc(coords3d, gradient=True) for pot in self.potentials
]
energies, gradients = zip(*energies_gradients)
self.log(f"Energies from external potential: {energies}")
energy = sum(energies)
forces = -np.sum(gradients, axis=0)
self.log(f"Forces from external potential: {forces}")
return energy, forces
def get_forces(self, atoms, coords):
potential_energy, potential_forces = self.get_potential_forces(coords)
if self.calculator is not None:
results = self.calculator.get_forces(atoms, coords)
else:
results = {"energy": 0.0, "forces": np.zeros_like(coords)}
results["energy"] += potential_energy
results["forces"] += potential_forces
return results
def get_hessian(self, atoms, coords):
raise Exception("Hessian is not implemented for ExternalPotential!")
|
eljost/pysisyphus
|
pysisyphus/calculators/ExternalPotential.py
|
Python
|
gpl-3.0
| 6,354
|
[
"xTB"
] |
321f4ec291aeb219d90793252d1ae1a08f84e7ac7863682827673dff6a066772
|
#! /usr/bin/env python
## Copyright (c) 1999 - 2003 L. C. Rees. All rights reserved.
## See COPYRIGHT file for license terms.
from __future__ import generators
__name__ = 'spider'
__version__ = '0.5'
__author__ = 'L.C. Rees (xanimal@users.sf.net)'
__all__ = ['ftpurls', 'ftppaths', 'weburls', 'ftpmirror', 'ftpspider',
'webpaths', 'webreport', 'webmirror', 'webspider', 'urlreport',
'badurlreport', 'badhtmreport', 'redireport', 'outreport', 'othereport']
'''Multithreaded crawling, reporting, and mirroring for Web and FTP.'''
class Spider:
'''HTTP and FTP crawling, reporting, and checking'''
import os as _os
import urllib as _ulib
import urlparse as _uparse
from os import path as _path
from ftplib import FTP as _ftp
from time import strftime as _formtime
from time import localtime as _localtime
from ftplib import error_perm as _ftperr
from sgmllib import SGMLParseError as _sperror
from robotparser import RobotFileParser as _rparser
# Use threads if available
try: from threading import Thread as _thread
except ImportError: pass
_bdsig, _bfsig, _session, _newparser = None, None, None, None
# HTML tags with URLs
_urltags = {'a':1, 'img':1, 'link':1, 'script':1, 'iframe':1, 'object':1,
'embed':1, 'area':1, 'frame':1, 'applet':1, 'input':1, 'base':1,
'div':1, 'layer':1, 'ilayer':1, 'bgsound':1}
# Supported protocols
_supported = {'HTTP':1, 'http':1, 'HTTPS':1, 'https':1, 'FTP':1, 'ftp':1}
# HTML attributes with URLs
_urlattrs = {'href':1, 'src':1, 'data':1}
def __init__(self, base=None, width=None, depth=None):
'''Initializes a Spider instance and its base attributes
Arguments:
base -- URL to crawl (default: None)
width -- maximum resources to crawl (default: None)
depth -- how deep in a hierarchy to crawl (default: None)'''
if base: self.base = base
else: self.base = None
if width: self.width = width
else: self.width = None
if depth: self.depth = depth
else: self.depth = None
def _ftpopen(self, base, name='anonymous', password=None, attempts=3):
'''Returns FTP client session
Arguments:
base -- FTP server URL
name -- login name (default: 'anonymous')
password -- login password (default: None)
attempts -- number of login attempts to try (default: 3)'''
def ftpprompt(tries=0):
'''Prompts for FTP username and password
Arguments:
tries -- number of login attempts'''
tries += tries
try:
self._name = raw_input('Enter login name: ')
self._password = raw_input('Enter password: ')
session = ftp(base, self._name, self._password)
return session
# If login attempt fails, retry login
except ftperr:
if attempts >= tries:
session = ftpprompt(tries)
return session
# Too many login attempts? End program
elif attempts <= tries:
raise IOError, 'Permission denied.'
import sys
sys.exit(0)
# Assignments
self._name, self._password, ftperr = name, password, self._ftperr
su, ftp = self._uparse.urlsplit(base), self._ftp
# Set URL, path, and strip 'ftp://' off
base, path = su[1], '/'.join([su[2], ''])
try: session = ftp(base, name, password)
# Prompt for username, password if initial arguments are incorrect
except ftperr: session = ftpprompt()
# Change to remote path if it exits
if path: session.cwd(path)
return session
def ftpmirror(self, l, t=None, b=None, w=200, d=6, n='anonymous', p=None):
'''Mirrors an FTP site on a local filesystem
Arguments:
l -- local filesystem path (default: None)
b -- FTP server URL (default: None)
t -- number of download threads (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
if b: self.ftpspider(b, w, d, n, p)
return self._mirror((self.paths, self.urls), l, t)
def ftppaths(self, b=None, w=200, d=6, n='anonymous', p=None):
'''Returns a list of FTP paths.
Arguments:
b -- FTP server URL (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
def sortftp(rdir):
'''Returns a list of entries marked as files or directories
Arguments:
rdir -- remote directory list'''
rlist = []
rappend = rlist.append
for rl in rdir:
# Split remote file based on whitespace
ri = rl.split()[-1]
# Add tuple of remote item type, permissions & name to rlist
if ri not in ('.', '..'): rappend((rl[0], rl[7], ri))
return rlist
def visitftp():
'''Extracts contents of an FTP directory'''
wd = pwd()
if wd[-1] != '/': wd = '/'.join([wd, ''])
# Add present working directory to visited directories
dirs[wd], rlist = None, []
# Get list of current directory's contents
retr('LIST -a', rlist.append)
for url in sortftp(rlist):
# Test if remote item is a file (indicated by '-')
if url[0] == '-':
# Resolve path of file
purl = ''.join([wd, url[2]])
# Ensure file list don't exceed max number of resources
if len(files) >= width: return None
# Add files to file dictionary
elif purl not in files: files[purl] = None
# Test if it's a directory ('d') and allows scanning ('-')
elif url[0] == 'd':
if url[1] != '-':
# Resolve path of directory
purl = ''.join([wd, url[2], '/'])
# Ensure no recursion beyond depth allowed
if len(purl.split('/')) >= depth: dirs[purl] = None
# Visit directory if it hasn't been visited yet
elif purl not in dirs:
# Change to new directory
cwd(purl)
# Run 'visitftp' on new directory
visitftp()
# Use classwide attributes if set
if b: self.base = b
else: b = self.base
# Use classwide width if different from method default
if self.width and w == 200: width = self.width
else: width = w
# Use classwide depth if different from method default
if self.depth and d == 6: depth = self.depth + 1
else: depth = d + 1
# File and directory dicts
files, dirs = {}, {}
# Use existing FTP client session if present
if self._session: ftp = self._session
# Create new FTP client session if necessary
else:
ftp = self._ftpopen(b, n, p)
self._session = ftp
# Avoid outside namespace lookups
cwd, pwd, retr = ftp.cwd, ftp.pwd, ftp.retrlines
# Walk FTP site
visitftp()
# Make path list out of files' keys and return it
self.paths = files.keys()
self.paths.sort()
return self.paths
def ftpspider(self, b=None, w=200, d=6, n='anonymous', p=None):
'''Returns lists of URLs and paths plus a live FTP client session
Arguments:
b -- FTP server URL (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
if b: ftppaths(b, w, d, n, p)
return self.paths, ftpurls(), self._session
def ftpurls(self, b=None, w=200, d=6, n='anonymous', p=None):
'''Returns a list of FTP URLs
Arguments:
b -- FTP server URL (default: None)
w -- maximum amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 6)
n -- login username (default: 'anonymous')
p -- login password (default: None)'''
if b:
ftppaths(b, w, d, n, p)
# Get rid of trailing '/' in base if present before joining
if b[-1] == '/': base = b[:-1]
else:
base = self.base
# Get rid of trailing '/' in base if present before joining
if base[-1] == '/': base = self.base[:-1]
paths = self.paths
# Add FTP URL
self.urls = [''.join([base, i]) for i in paths]
return self.urls
def _parserpick(self, old=None):
'''Returns a class using the sgmllib parser or the sgmlop parser
Arguments:
old -- use classic sgmllib SGMLParser'''
# Assignments
urltags, urlattrs = self._urltags, self._urlattrs
# Lists for bad file and bad directory signatures
self._bfsig, self._bdsig = [], []
bfsig, bdsig = self._bfsig, self._bdsig
# Use faster SGMLParser if available
try:
from sgmlop import SGMLParser as newparser
self._newparser = newparser
# If unavailable, use classic SGML parser
except ImportError:
from sgmllib import SGMLParser as oldparser
old = 1
# Classes using classic sgmllib SGML Parser
if old:
from sgmllib import SGMLParser as oldparser
# Remove sgmlop parser if present
self._newparser = None
# UrlExtract class using classic parser
class UrlExtract(oldparser):
'''Extracts URLs from a SGMLish document'''
def reset(self):
'''Resets SGML parser and clears lists'''
oldparser.reset(self)
self.urls, self.text, self.badurl = [], [], None
def handle_data(self, data):
'''Handles non-markup data'''
# Get first 5 lines of non-markup data
if len(self.text) <= 5: self.text.append(data)
# Compare signature of known bad URL to a new web page
if self.text == bfsig: self.badurl = 1
elif self.text == bdsig: self.badurl = 1
def finish_starttag(self, tag, attrs):
'''Extracts URL bearing tags'''
if tag in urltags:
# Get key, vale in attributes if they match
url = [v for k, v in attrs if k in urlattrs]
if url: self.urls.extend(url)
# BadUrl class using classic parser
class BadUrl(oldparser):
'''Collects results of intentionally incorrect URLs'''
def reset(self):
'''Resets SGML parser and clears lists'''
oldparser.reset(self)
self.text = []
def handle_data(self, data):
'''Collects lines to profile bad URLs'''
# Adds first 5 lines of non-markup data to text
if len(self.text) <= 5: self.text.append(data)
# If no old flag, use SGMLParser from sgmlop and related classes
else:
# UrlExtract class using sgmlop parser
class UrlExtract:
'''Extracts URLs from a SGMLish document'''
def __init__(self):
'''Resets SGML parser and clears lists'''
self.urls, self.text, self.badurl = [], [], None
def handle_data(self, data):
'''Handles non-markup data'''
# Get first 5 lines of non-markup data
if len(self.text) <= 5: self.text.append(data)
# Compare signature of known bad URL to a new web page
if self.text == bfsig: self.badurl = 1
elif self.text == bdsig: self.badurl = 1
def finish_starttag(self, tag, attrs):
'''Extracts URL bearing tags'''
if tag in urltags:
# Get key, vale in attributes if they match
url = [v for k, v in attrs if k in urlattrs]
if url: self.urls.extend(url)
# BadUrl class using sgmlop parser
class BadUrl:
'''Collects results of intentionally incorrect URLs'''
def __init__(self):
'''Resets SGML parser and clears lists'''
self.text = []
def handle_data(self, data):
'''Collects lines to profile not found responses'''
# Adds first 5 lines of non-markup data to list 'text'
if len(self.text) <= 5: self.text.append(data)
# Make resulting classes available class wide
self._UrlExtract, self._BadUrl = UrlExtract, BadUrl
def _webtest(self):
'''Generates signatures for identifying bad URLs'''
def badurl(url):
'''Returns first 5 lines of a bad URL
Arguments:
url -- Bad URL to open and parse'''
# Use different classes if faster SGML Parser is available
if self._newparser:
# sgmlop parser must have a handler passed to it
parser, urlget = self._newparser(), BadUrl()
# Pass handler (sgmlop cannot be subclassed)
parser.register(urlget)
parser.feed(urlopen(url).read())
parser.close()
# Use classic parser
else:
urlget = BadUrl()
urlget.feed(urlopen(url).read())
urlget.close()
# Return singature of bad URL
return urlget.text
# Make globals local
base, urljoin = self.base, self._uparse.urljoin
urlopen, BadUrl = self._ulib.urlopen, self._BadUrl
# Generate random string of jibber
from string import letters, digits
from random import choice, randint
jibber = ''.join([letters, digits])
ru = ''.join([choice(jibber) for x in range(randint(1, 30))])
# Builds signature of a bad URL for a file
self._bfsig.extend(badurl(urljoin(base, '%s.html' % ru)))
# Builds signature of a bad URL for a directory
self._bdsig.extend(badurl(urljoin(base,'%s/' % ru)))
def _webparser(self, html):
'''Parses HTML and returns bad URL indicator and extracted URLs
Arguments:
html -- HTML data'''
# Use different classes if faster SGML Parser is available
if self._newparser:
# Make instances of SGML parser and URL extracting handler
parser, urlget = self._newparser(), self._UrlExtract()
# Pass handler to parser
parser.register(urlget)
# Feed data to parser
parser.feed(html)
parser.close()
# Return bad URL indicator and extracted URLs
else:
urlget = self._UrlExtract()
urlget.feed(html)
urlget.close()
# Return badurl marker and list of child URLS
return urlget.badurl, urlget.urls
def _webopen(self, base):
'''Verifies URL and returns actual URL and extracted child URLs
Arguments:
base -- tuple containing a URL and its referring URL'''
# Assignments
good, cbase = self._good, base[0]
try:
# If webspiders can access URL, open it
if self._robot.can_fetch('*', cbase):
url = self._ulib.urlopen(cbase)
# Otherwise, mark as visited and abort
else:
self._visited[cbase] = 1
return False
# If HTTP error, log bad URL and abort
except IOError:
self._visited[cbase] = 1
self.badurls.append((base[1], cbase))
return False
# Get real URL
newbase = url.geturl()
# Change URL if different from old URL
if newbase != cbase: cbase, base = newbase, (newbase, base[1])
# URLs with mimetype 'text/html" scanned for URLs
if url.headers.type == 'text/html':
# Feed parser
contents = url.read()
try: badurl, urls = self._webparser(contents)
# Log URL if SGML parser can't parse it
except self._sperror:
self._visited[cbase], self.badhtm[cbase] = 1, 1
return False
url.close()
# Return URL and extracted urls if it's good
if not badurl: return cbase, urls
# If the URL is bad (after BadUrl), stop processing and log URL
else:
self._visited[cbase] = 1
self.badurls.append((base[1], cbase))
return False
# Return URL of non-HTML resources and empty list
else:
url.close()
return cbase, []
def _genverify(self, urls, base):
'''Verifies a list of full URL relative to a base URL
Arguments:
urls -- list of raw URLs
base -- referring URL'''
# Assignments
cache, visit, urlverify = self._cache, self._visited, self._urlverify
# Strip file off base URL for joining
newbase = base.replace(base.split('/')[-1], '')
for url in urls:
# Get resolved url and raw child URLs
url, rawurls = urlverify(url, base, newbase)
# Handle any child URLs
if rawurls:
newurls = {}
# Eliminate duplicate URLs
for rawurl in rawurls:
# Eliminate known visited URLs
if rawurl not in visit: newurls[rawurl] = 1
# Put new URLs in cache if present
if newurls: cache[url] = newurls
# Yield new URL
if url: yield url
def _multiverify(self, url, base):
'''Verifies a full URL relative to a base URL
Arguments:
url -- a raw URLs
base -- referring URL'''
# Assignments
cache, visited = self._cache, self._visited
# Strip file off base URL for joining
newbase = base.replace(base.split('/')[-1], '')
# Get resolved url and raw child URLs
url, rawurls = self._urlverify(url, base, newbase)
# Handle any child URLs
if rawurls:
# Eliminate known visited URLs and duplicates
for rawurl in rawurls:
# Put new URLs in cache if present
if rawurl not in visited: cache[rawurl] = url
# Put URL in list of good URLs
if url: self._good[url] = 1
def _urlverify(self, url, base, newbase):
'''Returns a full URL relative to a base URL
Arguments:
urls -- list of raw URLs
base -- referring URL
newbase -- temporary version of referring URL for joining'''
# Assignments
visited, webopen, other = self._visited, self._webopen, self.other
sb, depth, urljoin = self._sb[2], self.depth, self._uparse.urljoin
urlsplit, urldefrag = self._uparse.urlsplit, self._uparse.urldefrag
outside, redirs, supported = self.outside, self.redirs, self._supported
if url not in visited:
# Remove whitespace from URL
if url.find(' ') != -1:
visited[url], url = 1, url.replace(' ', '')
if url in visited: return 0, 0
# Remove fragments i.e. 'http:foo/bar#frag'
if url.find('#') != -1:
visited[url], url = 1, urldefrag(url)[0]
if url in visited: return 0, 0
# Process full URLs i.e. 'http://foo/bar
if url.find(':') != -1:
urlseg = urlsplit(url)
# Block non-FTP, HTTP URLs
if urlseg[0] not in supported:
# Log as non-FTP/HTTP URL
other[url], visited[url] = 1, 1
return 0, 0
# If URL is not in root domain, block it
if urlseg[1] not in sb:
visited[url], outside[url] = 1, 1
return 0, 0
# Block duplicate root URLs
elif not urlseg[2] and urlseg[1] == sb:
visited[url] = 1
return 0, 0
# Handle relative URLs i.e. ../foo/bar
elif url.find(':') == -1:
# Join root domain and relative URL
visited[url], url = 1, urljoin(newbase, url)
if url in visited: return 0, 0
# Test URL by attempting to open it
rurl = webopen((url, base))
if rurl and rurl[0] not in visited:
# Get URL
turl, rawurls = rurl
visited[url], visited[turl] = 1, 1
# If URL resolved to a different URL, process it
if turl != url:
urlseg = urlsplit(turl)
# If URL is not in root domain, block it
if urlseg[1] not in sb:
# Log as a redirected internal URL
redirs[(url, turl)] = 1
return 0, 0
# Block duplicate root URLs
elif not urlseg[2] and urlseg[1] == sb: return 0, 0
# If URL exceeds depth, don't process
if len(turl.split('/')) >= depth: return 0, 0
# Otherwise return URL
else:
if rawurls: return turl, rawurls
else: return turl, []
else: return 0,0
else: return 0, 0
def _onewalk(self):
'''Yields good URLs from under a base URL'''
# Assignments
cache, genverify = self._cache, self._genverify
# End processing if cache is empty
while cache:
# Fetch item from cache
base, urls = cache.popitem()
# If item has child URLs, process them and yield good URLs
if urls:
for url in genverify(urls, base): yield url
def _multiwalk(self, threads):
'''Extracts good URLs from under a base URL
Arguments:
threads -- number of threads to run'''
def urlthread(url, base):
'''Spawns a thread containing a multiverify function
Arguments:
url -- URL to verify
base -- referring URL'''
# Create instance of Thread
dthread = Thread(target=multiverify, args=(url, base))
# Put in pool
pool.append(dthread)
# Assignments
pool, cache, multiverify = [], self._cache, self._multiverify
Thread, width, good = self._thread, self.width, self._good
# End processing if cache is empty
while cache:
# Process URLs as long as width not exceeded
if len(good) <= width:
# Fetch item from cache
url, base = cache.popitem()
# Make thread
if url: urlthread(url, base)
# Run threads once pool size is reached
if len(pool) == threads or threads >= len(cache):
# Start threads
for thread in pool: thread.start()
# Empty thread pool as threads complete
while pool:
for thread in pool:
if not thread.isAlive(): pool.remove(thread)
# End if width reached
elif len(good) >= width: break
def weburls(self, base=None, width=200, depth=5, thread=None):
'''Returns a list of web paths.
Arguments:
base -- base web URL (default: None)
width -- amount of resources to crawl (default: 200)
depth -- depth in hierarchy to crawl (default: 5)
thread -- number of threads to run (default: None)'''
# Assignments
self._visited, self._good, self._cache, self.badurls = {}, {}, {}, []
self.redirs, self.outside, self.badhtm, self.other = {}, {}, {}, {}
onewalk, good, self._robot = self._onewalk, self._good, self._rparser()
uparse, robot, multiwalk = self._uparse, self._robot, self._multiwalk
cache = self._cache
# Assign width
if self.width and width == 200: width = self.width
else: self.width = width
# sgmlop crashes Python after too many iterations
if width > 5000: self._parserpick(1)
else: self._parserpick()
# Use global base if present
if not base: base = self.base
# Verify URL and get child URLs
newbase, rawurls = self._webopen((base, ''))
if newbase:
# Change base URL if different
if newbase != base: base = newbase
# Ensure there's a trailing '/' in base URL
if base[-1] != '/':
url = list(uparse.urlsplit(base))
url[1] = ''.join([url[1], '/'])
base = uparse.urlunsplit(url)
# Eliminate duplicates and put raw URLs in cache
newurls = {}
for rawurl in rawurls: newurls[rawurl] = 1
if newurls:
# Cache URLs individually if threads are desired
if thread:
for newurl in newurls: cache[newurl] = base
# Cache in group if no threads
else: cache[base] = newurls
# Make base URL, get split, and put in verified URL list
self.base, self._sb = base, base.split('/')
self._visited[base], good[base] = 1, 1
# If URL is bad, abort and raise error
else: raise IOError, "URL is invalid"
# Adjust dept to length of base URL
if self.depth and depth == 6: self.depth += len(self._sb)
else: self.depth = depth + len(self._sb)
# Get robot limits
robot.set_url(''.join([base, 'robots.txt']))
robot.read()
# Get signature of bad URL
self._webtest()
# Get good URLs as long as total width isn't exceeded
try:
# Multiwalk if threaded
if thread: self._multiwalk(thread)
# Otherwise, use single thread
else:
for item in onewalk():
# Don't exceed maximum width
if len(good) <= width: good[item] = 1
elif len(good) >= width: break
# If user interrupts crawl, return what's done
except KeyboardInterrupt: pass
# Get URLs, sort them, and return list
self.urls = good.keys()
self.urls.sort()
return self.urls
def webpaths(self, b=None, w=200, d=5, t=None):
'''Returns a list of web paths.
Arguments:
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
def pathize():
'''Strips base URL from full URLs to produce paths'''
for url in urls:
# Remove base URL from path list
url = url.replace(self.base, '')
# Add default name 'index.html' to root URLs and directories
if not url: url = 'index.html'
elif url[-1] == '/': url = ''.join([url, 'index.html'])
# Verify removal of base URL and remove it if found
if url.find(':') != -1: url = urlsplit(url)[2:][0]
yield url
# Assignments
urlsplit = self._uparse.urlsplit
# Run weburls if base passed as an argument
if b: self.weburls(b, w, d, t)
# Strip off trailing resource or query from base URL
if self.base[-1] != '/': self.base = '/'.join(self._sb[:-1])
urls = self.urls
# Return path list after stripping base URL
self.paths = list(pathize())
return self.paths
def webmirror(self, root=None, t=None, base=None, width=200, depth=5):
'''Mirrors a website on a local filesystem
Arguments:
root -- local filesystem path (default: None)
t -- number of threads (default: None)
base -- base web URL (default: None)
width -- amount of resources to crawl (default: 200)
depth -- depth in hierarchy to crawl (default: 5)'''
if base: self.webspider(base, width, depth, t)
# debug
# print self.paths
# print "urls"
# print self.urls
return self._mirror((self.paths, self.urls), root, t)
def webspider(self, b=None, w=200, d=5, t=None):
'''Returns two lists of child URLs and paths
Arguments:
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
return self.webpaths(), self.urls
def badurlreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of bad URLs
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.badurls:
# Number of bad URLs
amount = str(len(self.badurls))
header = '%s broken URLs under %s on %s:\n'
# Print referring URL pointing to bad URL
body = '\n'.join([' -> '.join([i[0], i[1]]) for i in self.badurls])
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def badhtmreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of unparsed HTML URLs
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.badhtm:
amount = str(len(self.badhtm))
header = '%s unparsable HTML URLs under %s on %s:\n'
body = '\n'.join(self.badhtm)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def redireport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of URLs redirected to an external URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.redirs:
amount = str(len(self.redirs))
header = '%s redirects to external URLs under %s on %s:\n'
# Print referring URL pointing to new URL
body = '\n'.join([' -> '.join([i[0], i[1]]) for i in self.redirs])
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def outreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of outside URLs referenced under the base URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.outside:
amount = str(len(self.outside))
header = '%s links to external URLs under %s on %s:\n'
body = '\n'.join(self.outside)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def othereport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of non-HTTP/FTP URLs
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.other:
amount = str(len(self.other))
header = '%s non-FTP/non-HTTP URLs under %s on %s:\n'
body = '\n'.join(self.other)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def urlreport(self, f=None, b=None, w=200, d=5, t=None):
'''Pretties up a list of all URLs under a URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)'''
if b: self.weburls(b, w, d, t)
# Format report if information is available
if self.urls:
amount = str(len(self.urls))
header = '%s verified URLs under %s on %s:\n'
body = '\n'.join(self.urls)
report = self._formatreport(amount, header, body, f)
# Return if just getting string
if report: return report
def webreport(self, f=None, b=None, w=200, d=5, t=None, *vargs):
'''Pretties up a list of logged information under a URL
Arguments:
f -- output file for report (default: None)
b -- base web URL (default: None)
w -- amount of resources to crawl (default: 200)
d -- depth in hierarchy to crawl (default: 5)
t -- number of threads (default: None)
vargs -- report sections to include or exclude
To override defaults:
To include a section add 'badhtm', 'redirs', 'outside', or 'other'
To exclude a section add 'badurls' or "urls"'''
if b: self.weburls(b, w, d, t)
# Defaults for report
badurls, badhtm, redirs, urls, outside, other = 1, 0, 0, 1, 0, 0
# Create compilation list
compile = []
# Override default report settings if argument is passed to vargs
for arg in vargs:
if arg == 'badurls': badurls = 0
elif arg == 'badhtm': badhtm = 1
elif arg == 'redirs': redirs = 1
elif arg == 'urls': urls = 0
elif arg == 'outside': outside = 1
elif arg == 'other': other = 1
# Compile report
if badurls:
badurls = self.badurlreport()
if badurls: compile.append(badurls)
if urls:
urls = self.urlreport()
if urls: compile.append(urls)
if outside:
outside = self.outreport()
if outside: compile.append(outside)
if redirs:
redirs = self.redireport()
if redirs: compile.append(redirs)
if badhtm:
badhtm = self.badhtmreport()
if badhtm: compile.append(badhtm)
if other:
other = self.othereport()
if other: compile.append(other)
# Make report
report = '\n\n'.join(compile)
# Write to file if argument present
if file: open(f, 'w').write(report)
# Or return string
else: return report
def _formatreport(self, amount, header, body, file=None):
'''Generic prettifier with date/time stamper
Arguments:
header -- title of report
body -- body of report
file -- output file for report (default: None)'''
# Get current time
localtime, strftime = self._localtime, self._formtime
curtime = strftime('%A, %B %d, %Y at %I:%M %p', localtime())
# Make section header
header = header % (amount, self.base, curtime)
# Add header to body
report = '\n'.join([header, body])
# Write to file if argument present
if file: open(file, 'w').write(report)
# Or return string
else: return report
def _mirror(self, lists, root=None, threads=None):
'''Mirrors a site on a local filesystem based on lists passed to it
Argument:
lists -- lists of URLs and paths
root -- local filesystem path (default: None)
threads -- number of threads (default: None)'''
def download(url, np, op):
'''Downloads files that need to be mirrored.'''
# If ftp...
if url[:3] == 'ftp':
# Open local file
local = open(np, 'wb')
# Download using FTP session
ftp = ftpopen(base, name, password)
ftp.retrbinary('RETR %s' % op, local.write)
ftp.close()
# Close local file
local.close()
# Use normal urlretrieve if no FTP required
else: ulib.urlretrieve(url, np)
def dlthread(url, np, op):
'''Spawns a thread containing the download function'''
# Create thread
dthread = Thread(target=download, args=(url, np, op))
# Add to thread pool
pool.append(dthread)
# Extract path and URL lists
paths, urls = lists
# Avoid outside namespace lookups
ulib, makedirs, sep = self._ulib, self._os.makedirs, self._os.sep
normcase, split = self._path.normcase, self._path.split
exists, isdir = self._path.exists, self._path.isdir
ftpopen = self._ftpopen
# Create local names for thread class and thread pool
if threads: Thread, pool = self._thread, []
# Localize name and password if exists
try: base, name, password = self.base, self._name, self._password
except AttributeError: pass
# Change to directory if given...
if root:
if exists(root):
if isdir(root): self._os.chdir(root)
# Create root if it doesn't exist
else:
makedirs(root)
self._os.chdir(root)
# Otherwise use current directory
else: root = self._os.getcwd()
# Iterate over paths and download files
for oldpath in paths:
# Sync with the URL for oldpath
url = urls[paths.index(oldpath)]
# Create name of local copy
newpath = normcase(oldpath).lstrip(sep)
# Get directory name
dirname = split(newpath)[0]
# If the directory exists, download the file directly
if exists(dirname):
if isdir(dirname):
if threads: dlthread(url, newpath, oldpath)
else: download(url, newpath, oldpath)
# Don't create local directory if path in root of remote URL
elif not dirname:
if threads: dlthread(url, newpath, oldpath)
else: download(url, newpath, oldpath)
# Make local directory if it doesn't exist, then dowload file
else:
makedirs(dirname)
if threads: dlthread(url, newpath, oldpath)
else: download(url, newpath, oldpath)
# Run threads if they've hit the max number of threads allowed
if threads:
# Run if max threads or final thread reached
if len(pool) == threads or paths[-1] == oldpath:
# Start all threads
for thread in pool: thread.start()
# Clear the thread pool as they finish
while pool:
for thread in pool:
if not thread.isAlive(): pool.remove(thread)
# Instance of Spider enables exporting Spider's methods as standalone functions
_inst = Spider()
ftpurls = _inst.ftpurls
weburls = _inst.weburls
ftppaths = _inst.ftppaths
webpaths = _inst.webpaths
ftpmirror = _inst.ftpmirror
ftpspider = _inst.ftpspider
webmirror = _inst.webmirror
webspider = _inst.webspider
webreport = _inst.webreport
urlreport = _inst.urlreport
outreport = _inst.outreport
redireport = _inst.redireport
othereport = _inst.othereport
badurlreport = _inst.badurlreport
badhtmreport = _inst.badhtmreport
|
heitara/webfetcher
|
spider.py
|
Python
|
apache-2.0
| 42,576
|
[
"VisIt"
] |
a924a47d9f6a5f1f51c83f41354761a5e049b939dd71cf9a16c38cee839f9aee
|
{
"name" : "Unified Chart of Accounts for US Non-Profits",
"version" : "2.6.0",
"author" : "Free Geek Twin Cities",
'email': 'brian@freegeektwincities.org',
'website': 'http://tryton-ucoa.googlecode.com/',
"category" : "Accounting",
"description": "Defines an account template for US non-profit/charitable organizations.",
"depends" : [
"account",
],
"xml" : [
"account_ucoa.xml",
],
}
|
FreeGeekTwinCities/tryton-ucoa
|
__tryton__.py
|
Python
|
gpl-3.0
| 447
|
[
"Brian"
] |
5825421bdc413fdd62f8b2a9d21c36c40e234cfe601805f37be6e35d7b6ba1fd
|
""" This file contains different utility functions that are not connected
in anyway to the networks presented in the tutorials, but rather help in
processing the outputs into a more understandable way.
For example ``tile_raster_images`` helps in generating a easy to grasp
image from a set of samples or weights.
"""
import numpy as np
import PIL as PIL
# Stuff for visualizing diagnostics
from sklearn.neighbors import KernelDensity
import matplotlib as mpl
mpl.use('Agg')
class batch(object):
def __init__(self,batch_size):
self.batch_size = batch_size
def __call__(self,f):
def wrapper(t,X):
X = np.array(X)
p = 0
rem = 0
results = []
while p < len(X):
Z = X[p:p+self.batch_size]
if Z.shape[0] != self.batch_size:
zeros = np.zeros((self.batch_size-len(Z),X.shape[1]))
rem = len(Z)
Z = np.array(np.vstack((Z,zeros)),dtype=X.dtype)
temp_results = f(t,Z)
if rem != 0:
temp_results = temp_results[:rem]
results.extend(temp_results)
p += self.batch_size
return np.array(results,dtype='float32')
return wrapper
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape=None, tile_shape=None, tile_spacing=(0, 0),
scale=True,
output_pixel_vals=True,
colorImg=False):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
"""
X = X * 1.0 # converts ints to floats
if colorImg:
channelSize = X.shape[1]/3
X = (X[:,0:channelSize], X[:,channelSize:2*channelSize], X[:,2*channelSize:3*channelSize], None)
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0] + tile_spacing[0]) * tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1] + tile_spacing[1]) * tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
if i < 3:
print('WHY AM I HERE (utils.py line 101)?')
else:
# use a recurrent call to compute the channel and store it
# in the output
xi = X[i]
if scale:
# shift and scale this channel to be in [0...1]
xi = (X[i] - X[i].min()) / (X[i].max() - X[i].min())
out_array[:, :, i] = tile_raster_images(xi, img_shape, tile_shape, tile_spacing, False, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
tmp = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
this_img = scale_to_unit_interval(tmp)
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
def visualize(EN, proto_key, layer_num, file_name):
W = EN.proto_nets[proto_key][layer_num].W.get_value(borrow=True).T
size = int(np.sqrt(W.shape[1]))
# hist(W.flatten(),bins=50)
image = PIL.Image.fromarray(tile_raster_images(X=W, \
img_shape=(size, size), tile_shape=(10,W.shape[0]/10),tile_spacing=(1, 1)))
image.save(file_name)
return
def visualize_net_layer(net_layer, file_name, colorImg=False, \
use_transpose=False, transform=None):
W = net_layer.W.get_value(borrow=False).T
if use_transpose:
W = net_layer.W.get_value(borrow=False)
if not (transform is None):
W = transform(W)
if colorImg:
size = int(np.sqrt(W.shape[1] / 3.0))
else:
size = int(np.sqrt(W.shape[1]))
num_rows = 10
num_cols = int((W.shape[0] / num_rows) + 0.999)
img_shape = (size, size)
tile_shape = (num_rows, num_cols)
image = tile_raster_images(X=W, img_shape=img_shape, tile_shape=tile_shape, \
tile_spacing=(1, 1), scale=True, colorImg=colorImg)
image = PIL.Image.fromarray(image)
image.save(file_name)
return
def visualize_samples(X_samp, file_name, num_rows=10):
d = int(np.sqrt(X_samp.shape[1]))
# hist(W.flatten(),bins=50)
image = PIL.Image.fromarray(tile_raster_images(X=X_samp, img_shape=(d, d), \
tile_shape=(num_rows,X_samp.shape[0]/num_rows),tile_spacing=(1, 1)))
image.save(file_name)
return
# Matrix to image
def mat_to_img(X, file_name, img_shape, num_rows=10, \
scale=True, colorImg=False, tile_spacing=(1,1)):
num_rows = int(num_rows)
num_cols = int((X.shape[0] / num_rows) + 0.999)
tile_shape = (num_rows, num_cols)
# make a tiled image from the given matrix's rows
image = tile_raster_images(X=X, img_shape=img_shape, \
tile_shape=tile_shape, tile_spacing=tile_spacing, \
scale=scale, colorImg=colorImg)
# convert to a standard image format and save to disk
image = PIL.Image.fromarray(image)
image.save(file_name)
return
def plot_kde_histogram(X, f_name, bins=25):
"""
Plot KDE-smoothed histogram of the data in X. Assume data is univariate.
"""
import matplotlib.pyplot as plt
X_samp = X.ravel()[:,np.newaxis]
X_min = np.min(X_samp)
X_max = np.max(X_samp)
X_range = X_max - X_min
sigma = X_range / float(bins)
plot_min = X_min - (X_range/3.0)
plot_max = X_max + (X_range/3.0)
plot_X = np.linspace(plot_min, plot_max, 1000)[:,np.newaxis]
# make a kernel density estimator for the data in X
kde = KernelDensity(kernel='gaussian', bandwidth=sigma).fit(X_samp)
# make a figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(plot_X, np.exp(kde.score_samples(plot_X)))
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_kde_histogram2(X1, X2, f_name, bins=25):
"""
Plot KDE-smoothed histogram of the data in X1/X2. Assume data is 1D.
"""
import matplotlib.pyplot as plt
# make a figure and configure an axis
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
for (X, style) in [(X1, '-'), (X2, '--')]:
X_samp = X.ravel()[:,np.newaxis]
X_min = np.min(X_samp)
X_max = np.max(X_samp)
X_range = X_max - X_min
sigma = X_range / float(bins)
plot_min = X_min - (X_range/3.0)
plot_max = X_max + (X_range/3.0)
plot_X = np.linspace(plot_min, plot_max, 1000)[:,np.newaxis]
# make a kernel density estimator for the data in X
kde = KernelDensity(kernel='gaussian', bandwidth=sigma).fit(X_samp)
ax.plot(plot_X, np.exp(kde.score_samples(plot_X)), linestyle=style)
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_stem(x, y, f_name):
"""
Plot a stem plot.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_line(x, y, f_name):
"""
Plot a line plot.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format=None, \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
def plot_scatter(x, y, f_name, x_label=None, y_label=None):
"""
Plot a scatter plot.
"""
import matplotlib.pyplot as plt
if x_label is None:
x_label = 'Posterior KLd'
if y_label is None:
y_label = 'Expected Log-likelihood'
fig = plt.figure()
ax = fig.add_subplot(111)
box = ax.get_position()
ax.set_position([box.x0+(0.05*box.width), box.y0+(0.05*box.height), 0.96*box.width, 0.96*box.height])
ax.set_xlabel(x_label, fontsize=22)
ax.set_ylabel(y_label, fontsize=22)
ax.hold(True)
ax.scatter(x, y, s=24, alpha=0.5, c=u'b', marker=u'o')
plt.sca(ax)
x_locs, x_labels = plt.xticks()
plt.xticks(x_locs, fontsize=18)
y_locs, y_labels = plt.yticks()
plt.yticks(y_locs, fontsize=18)
fig.savefig(f_name, dpi=None, facecolor='w', edgecolor='w', \
orientation='portrait', papertype=None, format='png', \
transparent=False, bbox_inches=None, pad_inches=0.1, \
frameon=None)
plt.close(fig)
return
|
Philip-Bachman/ICML-2015
|
utils.py
|
Python
|
mit
| 11,903
|
[
"Gaussian"
] |
444ff2f8e4b563f6de70ac5c1b970fb4b692c74eff718f83f63452c73d7747dd
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 11:23:01 2016
@author: camacho
"""
#Exemplo da pagina do george
########## GERAR DADOS INICIAIS ALEATORIOS ##########
import numpy as np
import matplotlib.pyplot as pl
pl.close("all") #fecha todas as figuras anteriores
#Generate some fake noisy data.
x = 10 * np.sort(np.random.rand(20))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
pl.figure()
pl.plot(x,y,'*') #faz o plot de (x,y) com estrelinhas
########## USAR PROCESSOS GAUSSIANOS ##########
#importar o modulo george e a kernel a usar
import george
from george.kernels import ExpSquaredKernel, Matern32Kernel, CosineKernel
#Set up the Gaussian process.
kernel = 1*ExpSquaredKernel(1.0) #original do exemplo
#kernel = CosineKernel(1.0)
gp = george.GP(kernel)
#Pre-compute the factorization of the matrix.
gp.compute(x, yerr)
cov= gp.compute(x,yerr)
#Compute the log likelihood.
print(gp.lnlikelihood(y))
print(gp.grad_lnlikelihood(y))
#like=gp.lnlikelihood(y)
#Compute the predicted values of the function at a fine grid of points
#conditioned on the observed data
t = np.linspace(0, 10, 500)
mu, cov = gp.predict(y, t) #mean mu and covariance cov
std = np.sqrt(np.diag(cov))
#Graficos todos xpto
pl.figure()
pl.fill_between(t, mu+std, mu-std, color="k", alpha=0.1)
pl.plot(t, mu+std, color="k", alpha=1, lw=0.25)
pl.plot(t, mu-std, color="k", alpha=1, lw=0.25)
pl.plot(t, mu, color="k", alpha=1, lw=0.5)
pl.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("$x$")
pl.ylabel("$y$")
|
jdavidrcamacho/Tests_GP
|
00 - Other examples/George_0.2.1 examples/01_Exemplo_simples.py
|
Python
|
mit
| 1,565
|
[
"Gaussian"
] |
27efaf054651a6bb8395899c731657d75e2fbead50772740240350c36505be3a
|
from django.contrib.auth.models import User
from django.utils.timezone import utc, make_aware
from django.core.mail import send_mass_mail
from django.core.management.base import BaseCommand
from django.template import Context, loader
from optparse import make_option
from datetime import datetime, timedelta
import logging
import sys
logger = logging.getLogger(__name__)
def list_option_callback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
class Command(BaseCommand):
"""Note that this entire command can be replaced with scheduled celery
tasks. Instead of running this once daily, you just schedule a task for
later when somebody signs up.
"""
option_list = BaseCommand.option_list + (
make_option(
'--simulate',
action='store_true',
default=False,
help='Don\'t send any emails, just pretend.',
),
)
help = 'Sends a welcome email to the people who signed up in the last ' \
'24 hours.'
def send_emails(self, recipients):
"""Send the emails using the templates and contexts requested."""
messages = []
email_subject = 'Hi from CourtListener and Free Law Project'
email_sender = 'Brian Carver <bcarver@courtListener.com>'
txt_template = loader.get_template('emails/welcome_email.txt')
for recipient in recipients:
c = Context({'name': recipient.first_name,})
email_txt = txt_template.render(c)
messages.append((
email_subject,
email_txt,
email_sender,
[recipient.email],
))
if not self.options['simulate']:
send_mass_mail(messages, fail_silently=False)
logger.info("Sent daily welcome emails.")
else:
sys.stdout.write('Simulation mode. Imagine that we just sent the '
'welcome emails!\n')
def handle(self, *args, **options):
self.options = options
recipients = User.objects.filter(date_joined__gt=make_aware(datetime.now(), utc) - timedelta(days=1))
if recipients:
if self.options['simulate']:
sys.stdout.write("**********************************\n")
sys.stdout.write("* SIMULATE MODE - NO EMAILS SENT *\n")
sys.stdout.write("**********************************\n")
self.send_emails(recipients)
|
shashi792/courtlistener
|
alert/userHandling/management/commands/cl_welcome_new_users.py
|
Python
|
agpl-3.0
| 2,495
|
[
"Brian"
] |
b5a122f4df4edf2aaeaadc8df174d044e840305029e2a43901b53e70186d9fcf
|
import sys
from classFinder import *
from classHTMLRenderer import *
# The purpose this this code is to generate documentation for the
# text interface of Magpie. It will parse the Javadoc from Magpie
# to identify all implemented classes. Then, it assembles a website
# that tells users the name of each class, how to instantiate it,
# and what commands it can run.
#
# To Do List:
# - Be able to tell when a command was overloaded
# - Only crash when a single file is ill-formated
# = Debatable: Catastrophic failure means documentation would mean that file would be more likely to be fixed
#
# Authors:
# Logan Ward (ward.logan.t@gmail.com)
if len(sys.argv) != 2:
print "Write documentation of how to instantiate and use Magpie variables"
print "Usage: %s <javadoc dir>"%(sys.argv[0])
sys.exit()
docDir=sys.argv[1]
# Look for all classes
lib=ClassLibrary()
lib.findClasses(docDir, "magpie")
# Define useful functions
def printClassSummary(fp, classes):
"""
Generate a quick summary of each class in a list
:param fp Pointer to output file
:param classes List of classes
"""
# Get info
output = []
for cls in classes:
path=HTMLRenderer.pathToDocumentationFile(cls)
newLine = "<b><a href=\"" + path + "\">" + cls.package + "." + cls.name + "</a></b>"
if len(cls.usage) > 1:
newLine += ": Usage: " + cls.usage
output.append(newLine)
output.sort()
# Print it
started = False
for line in output:
toPrint = ""
if not started: started = True
else: toPrint += "</br>"
toPrint += line
print >>fp, toPrint
## Print header
fp = open("variables.html", "w")
print >>fp, "<html>"
print >>fp, "<head>"
print >>fp, "\t<title>Magpie Variable Types</title>"
print >>fp, "\t<link rel=\"stylesheet\" href=\"style.css\" type=\"text/css\" media=\"screen\" />"
print >>fp, "</head>"
## Print introduction
print >>fp, "<body>"
print >>fp, "<div id=\"wrapper\">"
print >>fp, "<div class=\"footer\">"
print >>fp, "\t<center><a href=\"index.html\">Manual Home</a></center>"
print >>fp, "</div>"
print >>fp, "<center><h1>Variable Types</h1></center>"
print >>fp, "<p>Magpie comes equipped with many different kinds of datasets, models, crystal structure prediction algorithms, and other kinds of variables. This section includes all of the currently available variable types and links to pages that describe what operations they support. If you are not yet familiar with how to call these operations, please consult the <a href=\"text-interface.html\">documentation for the text interface</a>.</p>"
## Print dataset classes
print >>fp, "<h2>Datasets</h2>"
print >>fp, "<p>Each of these dataset objects can be used to represent different kinds of data, both in terms of"
print >>fp, " how Magpie represents an entry internally and what kind of attributes it can generate.</p>"
classes = lib.getCompleteSubclasses("Dataset")
printClassSummary(fp, classes)
for cls in classes:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
## Print model classes
print >>fp, "<h2>Models</h2>"
print >>fp, "<p>Magpie is equipped with the ability to generate many different kinds of models. This includes "
print >>fp, "models for classifying data into known subsets or predicting the value of some property.</p>"
classes = lib.getCompleteSubclasses("BaseModel")
print >>fp, "<h3>Classification Models</h3>"
print >>fp, "<p>Classifiers are used decide which group an entry belongs out of a finite list of options.</p>"
subClasses = [ x for x in classes if "classifi" in x.package ]
printClassSummary(fp, subClasses)
for cls in subClasses:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
print >>fp, "<h3>Regression Models</h3>"
print >>fp, "<p>Regression models are used to approximate unknown, continuous"
print >>fp, " functions (think y = f(x) = a + b * x).</p>"
subClasses = [ x for x in classes if "regression" in x.package ]
printClassSummary(fp, subClasses)
for cls in subClasses:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
## Print statistics classes
print >>fp, "<h2>Statistics Calculators</h2>"
print >>fp, "<p>Each of these objects can be used calculate different statistics about the performance of a model.</p>"
classes = lib.getCompleteSubclasses("BaseStatistics")
printClassSummary(fp, classes)
for cls in classes:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
## Print clusterer classes
print >>fp, "<h2>Clusterers</h2>"
print >>fp, "<p>Clustering algorithms perform unsupervised learning, which recognizes "
print >>fp, "groups of data with similar attributes and provides rules for how to distinguish between them. "
print >>fp, "These groups <i>are not</i> known beforehand, use classification algorithms to build rules for "
print >>fp, "separating data into already-known groups.</p>"
classes = lib.getCompleteSubclasses("BaseClusterer")
printClassSummary(fp, classes)
for cls in classes:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
## Print Optimization Classes
print >>fp, "<h2>Optimizers</h2>"
print >>fp, "<p>Optimization algorithms are designed to quickly locate optimal candidates out of a large space "
print >>fp, "of possibilities.</p>"
classes = lib.getCompleteSubclasses("BaseOptimizer")
printClassSummary(fp, classes)
for cls in classes:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
## Print Optimization Classes
print >>fp, "<h2>Crystal Structure Predictors</h2>"
print >>fp, "<p>Crystal structure prediction algorithms are used to predict which crystal structure "
print >>fp, "is most probable out of a list of known prototypes to be stable at a certain composition.</p>"
classes = lib.getCompleteSubclasses("CSPEngine")
printClassSummary(fp, classes)
for cls in classes:
HTMLRenderer.writeDocumentationFile(docDir, cls, lib)
## Close up shop
print >>fp, "</div>\n</body>"
print >>fp, "</html>"
fp.close()
|
amarkrishna/demo1
|
doc/generate-command-autodoc.py
|
Python
|
mit
| 5,845
|
[
"CRYSTAL"
] |
c4cbff85f4ee1ed349d4658380fcd7278aab45b69361c9ce778c493aa6ea1ba1
|
#!/usr/bin/env python
"""Copyright 2008 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.4'
__date__ = 'May 02 2008'
#Basic imports
from threading import *
from ctypes import *
import sys
#Phidget specific imports
from Phidgets.PhidgetException import *
from Phidgets.Events.Events import *
from Phidgets.Devices.Accelerometer import *
#Create an accelerometer object
accelerometer = Accelerometer()
#Information Display Function
def DisplayDeviceInfo():
print "|------------|----------------------------------|--------------|------------|"
print "|- Attached -|- Type -|- Serial No. -|- Version -|"
print "|------------|----------------------------------|--------------|------------|"
print "|- %8s -|- %30s -|- %10d -|- %8d -|" % (accelerometer.isAttached(), accelerometer.getDeviceType(), accelerometer.getSerialNum(), accelerometer.getDeviceVersion())
print "|------------|----------------------------------|--------------|------------|"
print "Number of Axes: %i" % (accelerometer.getAxisCount())
return 0
#Event Handler Callback Functions
def AccelerometerAttached(e):
attached = e.device
print "Accelerometer %i Attached!" % (attached.getSerialNum())
return 0
def AccelerometerDetached(e):
detached = e.device
print "Accelerometer %i Detached!" % (detached.getSerialNum())
return 0
def AccelerometerError(e):
print "Phidget Error %i: %s" % (e.eCode, e.description)
return 0
def AccelerometerAccelerationChanged(e):
print "Axis %i: %6f" % (e.index, e.acceleration)
return 0
#Main Program Code
try:
accelerometer.setOnAttachHandler(AccelerometerAttached)
accelerometer.setOnDetachHandler(AccelerometerDetached)
accelerometer.setOnErrorhandler(AccelerometerError)
accelerometer.setOnAccelerationChangeHandler(AccelerometerAccelerationChanged)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Opening phidget object...."
try:
accelerometer.openPhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Waiting for attach...."
try:
accelerometer.waitForAttach(10000)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
try:
accelerometer.closePhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Exiting...."
exit(1)
else:
try:
numAxis = accelerometer.getAxisCount()
accelerometer.setAccelChangeTrigger(0, 0.500)
accelerometer.setAccelChangeTrigger(1, 0.500)
if numAxis > 2:
accelerometer.setAccelChangeTrigger(2, 0.500)
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
DisplayDeviceInfo()
print "Press Enter to quit...."
chr = sys.stdin.read(1)
print "Closing..."
try:
accelerometer.closePhidget()
except PhidgetException, e:
print "Phidget Exception %i: %s" % (e.code, e.message)
print "Exiting...."
exit(1)
print "Done."
exit(0)
|
jantman/tuxostat
|
fs_backup/home/tuxostat/devel/Python/Accelerometer-simple.py
|
Python
|
gpl-3.0
| 3,507
|
[
"VisIt"
] |
2e8aeee4ed99a3c31594aebaa2abd5747e6795121f70302a8dcd42c74ec8b926
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
import logging
import warnings
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt, pretty_plot
__author__ = "Kiran Mathew, gmatteo"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(metaclass=ABCMeta):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b / (2 * a)
e0 = a * (v0**2) + b * v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError("The minimum volume of a fitted parabola is not in the input volumes\n.")
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
lambda pars, x, y: y - self._func(x, pars),
self._params,
args=(self.volumes, self.energies),
)
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
def func(self, volume):
"""
The equation of state function with the parameters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
"""
Args:
volume (): Volume
Returns:
Compute EOS with this volume.
"""
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
# pylint: disable=E1307
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", f"{self.__class__.__name__} fit")
lines = [
f"Equation of State: {self.__class__.__name__}",
f"Minimum energy = {self.e0:1.2f} eV",
f"Minimum or reference volume = {self.v0:1.2f} Ang^3",
f"Bulk modulus = {self.b0:1.2f} eV/Ang^3 = {self.b0_GPa:1.2f} GPa",
f"Derivative of bulk modulus wrt pressure = {self.b1:1.2f}",
]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
@add_fig_kwargs
def plot_ax(self, ax=None, fontsize=12, **kwargs):
"""
Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object.
"""
# pylint: disable=E1307
ax, fig, plt = get_ax_fig_plt(ax=ax)
color = kwargs.get("color", "r")
label = kwargs.get("label", f"{self.__class__.__name__} fit")
lines = [
f"Equation of State: {self.__class__.__name__}",
f"Minimum energy = {self.e0:1.2f} eV",
f"Minimum or reference volume = {self.v0:1.2f} Ang^3",
f"Bulk modulus = {self.b0:1.2f} eV/Ang^3 = {self.b0_GPa:1.2f} GPa",
f"Derivative of bulk modulus wrt pressure = {self.b1:1.2f}",
]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
ax.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
ax.grid(True)
ax.set_xlabel("Volume $\\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
ax.text(
0.5,
0.5,
text,
fontsize=fontsize,
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
)
return fig
class Murnaghan(EOSBase):
"""
Murnaghan EOS.
"""
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return e0 + b0 * volume / b1 * (((v0 / volume) ** b1) / (b1 - 1.0) + 1.0) - v0 * b0 / (b1 - 1.0)
class Birch(EOSBase):
"""
Birch EOS.
"""
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (
e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.0) * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 3
)
class BirchMurnaghan(EOSBase):
"""
BirchMurnaghan EOS
"""
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (v0 / volume) ** (1.0 / 3.0)
return e0 + 9.0 * b0 * v0 / 16.0 * (eta**2 - 1) ** 2 * (6 + b1 * (eta**2 - 1.0) - 4.0 * eta**2)
class PourierTarantola(EOSBase):
"""
PourierTarantola EOS
"""
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1.0 / 3.0)
squiggle = -3.0 * np.log(eta)
return e0 + b0 * v0 * squiggle**2 / 6.0 * (3.0 + squiggle * (b1 - 2))
class Vinet(EOSBase):
"""
Vinet EOS.
"""
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1.0 / 3.0)
return e0 + 2.0 * b0 * v0 / (b1 - 1.0) ** 2 * (
2.0 - (5.0 + 3.0 * b1 * (eta - 1.0) - 3.0 * eta) * np.exp(-3.0 * (b1 - 1.0) * (eta - 1.0) / 2.0)
)
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the initial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = -v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
"""
Fitting a polynomial EOS using delta factor.
"""
def _func(self, volume, params):
x = volume ** (-2.0 / 3.0)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overridden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes ** (-2.0 / 3.0)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overridden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x ** (-3.0 / 2.0)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4.0 / 9.0 * x**5.0 * deriv2(x)
derivV3 = -20.0 / 9.0 * x ** (13.0 / 2.0) * deriv2(x) - 8.0 / 27.0 * x ** (15.0 / 2.0) * deriv3(x)
b0 = derivV2 / x ** (3.0 / 2.0)
b1 = -1 - x ** (-3.0 / 2.0) * derivV3 / derivV2
# e0, b0, b1, v0
self._params = [deriv0(v0 ** (-2.0 / 3.0)), b0, b1, v0]
class NumericalEOS(PolynomialEOS):
"""
A numerical EOS.
"""
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2):
"""
Fit the input data to the 'numerical eos', the equation of state employed
in the quasiharmonic Debye model described in the paper:
10.1103/PhysRevB.90.174107.
credits: Cormac Toher
Args:
min_ndata_factor (int): parameter that controls the minimum number
of data points that will be used for fitting.
minimum number of data points =
total data points-2*min_ndata_factor
max_poly_order_factor (int): parameter that limits the max order
of the polynomial used for fitting.
max_poly_order = number of data points used for fitting -
max_poly_order_factor
min_poly_order (int): minimum order of the polynomial to be
considered for fitting.
"""
warnings.simplefilter("ignore", np.RankWarning)
def get_rms(x, y):
return np.sqrt(np.sum((np.array(x) - np.array(y)) ** 2) / len(x))
# list of (energy, volume) tuples
e_v = list(zip(self.energies, self.volumes))
ndata = len(e_v)
# minimum number of data points used for fitting
ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1)
rms_min = np.inf
# number of data points available for fit in each iteration
ndata_fit = ndata
# store the fit polynomial coefficients and the rms in a dict,
# where the key=(polynomial order, number of data points used for
# fitting)
all_coeffs = {}
# sort by energy
e_v = sorted(e_v, key=lambda x: x[0])
# minimum energy tuple
e_min = e_v[0]
# sort by volume
e_v = sorted(e_v, key=lambda x: x[1])
# index of minimum energy tuple in the volume sorted list
emin_idx = e_v.index(e_min)
# the volume lower than the volume corresponding to minimum energy
v_before = e_v[emin_idx - 1][1]
# the volume higher than the volume corresponding to minimum energy
v_after = e_v[emin_idx + 1][1]
e_v_work = deepcopy(e_v)
# loop over the data points.
while (ndata_fit >= ndata_min) and (e_min in e_v_work):
max_poly_order = ndata_fit - max_poly_order_factor
e = [ei[0] for ei in e_v_work]
v = [ei[1] for ei in e_v_work]
# loop over polynomial order
for i in range(min_poly_order, max_poly_order + 1):
coeffs = np.polyfit(v, e, i)
pder = np.polyder(coeffs)
a = np.poly1d(pder)(v_before)
b = np.poly1d(pder)(v_after)
if a * b < 0:
rms = get_rms(e, np.poly1d(coeffs)(v))
rms_min = min(rms_min, rms * i / ndata_fit)
all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms]
# store the fit coefficients small to large,
# i.e a0, a1, .. an
all_coeffs[(i, ndata_fit)][0].reverse()
# remove 1 data point from each end.
e_v_work.pop()
e_v_work.pop(0)
ndata_fit = len(e_v_work)
logger.info(f"total number of polynomials: {len(all_coeffs)}")
norm = 0.0
fit_poly_order = ndata
# weight average polynomial coefficients.
weighted_avg_coeffs = np.zeros((fit_poly_order,))
# combine all the filtered polynomial candidates to get the final fit.
for k, v in all_coeffs.items():
# weighted rms = rms * polynomial order / rms_min / ndata_fit
weighted_rms = v[1] * k[0] / rms_min / k[1]
weight = np.exp(-(weighted_rms**2))
norm += weight
coeffs = np.array(v[0])
# pad the coefficient array with zeros
coeffs = np.lib.pad(coeffs, (0, max(fit_poly_order - len(coeffs), 0)), "constant")
weighted_avg_coeffs += weight * coeffs
# normalization
weighted_avg_coeffs /= norm
weighted_avg_coeffs = weighted_avg_coeffs.tolist()
# large to small(an, an-1, ..., a1, a0) as expected by np.poly1d
weighted_avg_coeffs.reverse()
self.eos_params = weighted_avg_coeffs
self._set_params()
class EOS:
"""
Convenient wrapper. Retained in its original state to ensure backward
compatibility.
Fit equation of state for bulk systems.
The following equations are supported::
murnaghan: PRB 28, 5480 (1983)
birch: Intermetallic compounds: Principles and Practice, Vol I:
Principles. pages 195-210
birch_murnaghan: PRB 70, 224107
pourier_tarantola: PRB 70, 224107
vinet: PRB 70, 224107
deltafactor
numerical_eos: 10.1103/PhysRevB.90.174107.
Usage::
eos = EOS(eos_name='murnaghan')
eos_fit = eos.fit(volumes, energies)
eos_fit.plot()
"""
MODELS = {
"murnaghan": Murnaghan,
"birch": Birch,
"birch_murnaghan": BirchMurnaghan,
"pourier_tarantola": PourierTarantola,
"vinet": Vinet,
"deltafactor": DeltaFactor,
"numerical_eos": NumericalEOS,
}
def __init__(self, eos_name="murnaghan"):
"""
Args:
eos_name (str): Type of EOS to fit.
"""
if eos_name not in self.MODELS:
raise EOSError(
"The equation of state '{}' is not supported. "
"Please choose one from the following list: {}".format(eos_name, list(self.MODELS.keys()))
)
self._eos_name = eos_name
self.model = self.MODELS[eos_name]
def fit(self, volumes, energies):
"""
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
"""
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
class EOSError(Exception):
"""
Error class for EOS fitting.
"""
|
materialsproject/pymatgen
|
pymatgen/analysis/eos.py
|
Python
|
mit
| 19,614
|
[
"ASE",
"pymatgen"
] |
2706c42d1ab4279d115c1b377913221f7cc919ab974d72b0e48b635d20fa3ab9
|
# -*- coding: utf-8 -*-
#
# rate_neuron_dm.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Rate neuron decision making
---------------------------
A binary decision is implemented in the form of two rate neurons
engaging in mutual inhibition.
Evidence for each decision is reflected by the mean input
experienced by the respective neuron.
The activity of each neuron is recorded using multimeter devices.
It can be observed how noise as well as the difference in evidence
affects which neuron exhibits larger activity and hence which
decision will be made.
"""
import nest
import matplotlib.pyplot as plt
import numpy
##########################################################################
# First,the function ``build_network`` is defined to build the network
# and return the handles of two decision units and the ``multimeter``
def build_network(sigma, dt):
nest.ResetKernel()
nest.SetKernelStatus({'resolution': dt, 'use_wfr': False})
Params = {'lambda': 0.1, 'sigma': sigma, 'tau': 1., 'rectify_output': True}
D1 = nest.Create('lin_rate_ipn', params=Params)
D2 = nest.Create('lin_rate_ipn', params=Params)
nest.Connect(D1, D2, 'all_to_all', {
'synapse_model': 'rate_connection_instantaneous', 'weight': -0.2})
nest.Connect(D2, D1, 'all_to_all', {
'synapse_model': 'rate_connection_instantaneous', 'weight': -0.2})
mm = nest.Create('multimeter')
mm.set(interval=dt, record_from=['rate'])
nest.Connect(mm, D1, syn_spec={'delay': dt})
nest.Connect(mm, D2, syn_spec={'delay': dt})
return D1, D2, mm
###########################################################################
# The function ``build_network`` takes the noise parameter sigma
# and the time resolution as arguments.
# First, the Kernel is reset and the ``use_wfr`` (waveform-relaxation)
# is set to false while the resolution is set to the specified value
# `dt`. Two rate neurons with linear activation functions are created
# and the handle is stored in the variables `D1` and `D2`. The output
# of both decision units is rectified at zero. The two decisions
# units are coupled via mutual inhibition. Next the multimeter is
# created and the handle stored in mm and the option ``record_from``
# is set. The multimeter is then connected to the two units in order
# to 'observe' them. The ``Connect`` function takes the handles as
# input.
###########################################################################
# The decision making process is simulated for three different levels
# of noise and three differences in evidence for a given decision. The
# activity of both decision units is plotted for each scenario.
fig_size = [14, 8]
fig_rows = 3
fig_cols = 3
fig_plots = fig_rows * fig_cols
face = 'white'
edge = 'white'
ax = [None] * fig_plots
fig = plt.figure(facecolor=face, edgecolor=edge, figsize=fig_size)
dt = 1e-3
sigma = [0.0, 0.1, 0.2]
dE = [0.0, 0.004, 0.008]
T = numpy.linspace(0, 200, int(200/dt) - 1)
for i in range(9):
c = i % 3
r = int(i / 3)
D1, D2, mm = build_network(sigma[r], dt)
###########################################################################
# First using build_network the network is build and the handles of
# the decision units and the multimeter are stored in `D1`, `D2` and `mm`
nest.Simulate(100.0)
D1.mu = 1. + dE[c]
D2.mu = 1. - dE[c]
nest.Simulate(100.0)
########################################################################
# The network is simulated using ``Simulate``, which takes the desired
# simulation time in milliseconds and advances the network state by
# this amount of time. After an initial period in the absence of evidence
# for either decision, evidence is given by changing the state of each
senders = mm.get('events', 'senders')
voltages = mm.get('events', 'rate')
########################################################################
# The activity values ('voltages') are read out by the multimeter
ax[i] = fig.add_subplot(fig_rows, fig_cols, i + 1)
ax[i].plot(T, voltages[numpy.where(senders == D1.global_id)],
'b', linewidth=2, label="D1")
ax[i].plot(T, voltages[numpy.where(senders == D2.global_id)],
'r', linewidth=2, label="D2")
ax[i].set_ylim([-.5, 12.])
ax[i].get_xaxis().set_ticks([])
ax[i].get_yaxis().set_ticks([])
if c == 0:
ax[i].set_ylabel(r"activity ($\sigma=%.1f$) " % (sigma[r]))
ax[i].get_yaxis().set_ticks([0, 3, 6, 9, 12])
if r == 0:
ax[i].set_title(r"$\Delta E=%.3f$ " % (dE[c]))
if c == 2:
plt.legend(loc=0)
if r == 2:
ax[i].get_xaxis().set_ticks([0, 50, 100, 150, 200])
ax[i].set_xlabel('time (ms)')
########################################################################
# The activity of the two units is plotted in each scenario.
#
# In the absence of noise, the network will not make a decision if evidence
# for both choices is equal. With noise, this symmetry can be broken and a
# decision wil be taken despite identical evidence.
#
# As evidence for `D1` relative to `D2` increases, it becomes more likely that
# the corresponding decision will be taken. For small differences in the
# evidence for the two decisions, noise can lead to the 'wrong' decision.
plt.show()
|
lekshmideepu/nest-simulator
|
pynest/examples/rate_neuron_dm.py
|
Python
|
gpl-2.0
| 5,947
|
[
"NEURON"
] |
4fa3fef01071e39cf4cd0095d17e32a6ad497f5b15eecbbe927d43068e963851
|
from smashbox.utilities import *
# utilities to create and process self-describing hashfiles
# a hashfile encodes its content checksum in its name
# the name of a hashfile may be specified using a template string (filemask) where {md5} string represents the content checksum
# for example: "test_{md5}.dat"
# hashfile size may be specified as
# - number of bytes (int)
# - a gaussian distribution (mean,sigma)
config.hashfile_size = (3.5,1.37) # standard file distribution: 10^(3.5) Bytes
config.hashfile_bigsize = (5,1.37) # big file distribution
#if defined, the file size distribution will be cut off at hashfile_maxsize
config.hashfile_maxsize = 5*1000*1000*1000
# these are ignored files which are normally not synced
config.ignored_files = ['.csync_journal.db', '.csync_journal.db-wal', '.csync_journal.db-shm']
# control memory usage of functions reading/generating files
BLOCK_SIZE = 1024*1024
import os
import fnmatch
def get_files(wdir, filemask=None):
fl = os.listdir(wdir)
# if filemask defined then filter names out accordingly
if filemask:
fl = fnmatch.filter(fl, filemask.replace('{md5}', '*'))
fl = set(fl) - set(config.ignored_files)
return fl
def count_files(wdir, filemask=None):
fl = get_files(wdir, filemask)
nf = len(fl)
logger.info('%s: %d files found', wdir, nf)
return nf
def size2nbytes(size):
""" Return the number of bytes from the size specification (size may be a distribution or nbytes directly).
"""
import random, math
def make_distrib(size):
xxx = random.gauss(size[0],size[1])
nbytes = int(math.pow(10,xxx))
if nbytes<10:nbytes=10
if config.hashfile_maxsize and nbytes>config.hashfile_maxsize:
nbytes=config.hashfile_maxsize
return nbytes
try:
return int(size)
except TypeError:
return make_distrib(size)
except ValueError:
return make_distrib(size)
def create_hashfile(wdir,filemask=None,size=None,bs=None,slow_write=None):
""" Create a random file in wdir.The md5 checksum is placed in the filname name according to filemask: {md5} string in the filemask is replaced by the file checksum.
By default the filemask == {md5} so the filename consists of only the checksum.
The function will use max BLOCK_SIZE memory. Below BLOCK_SIZE the file is fully random. For larger files the BLOCK_SIZE bytes are replicated.
The default BLOCK_SIZE may be changed with the bs argument.
Optional slow_write may specify the delay in seconds between writing blocks.
Return name of the create file.
"""
return create_hashfile2(wdir,filemask,size,bs,slow_write)[0]
def create_hashfile2(wdir,filemask=None,size=None,bs=None,slow_write=None):
""" Same as create_hashfile but return (filename,md5sum).
"""
import hashlib
import random
if size is None:
size = config.hashfile_size
nbytes = size2nbytes(size)
if not bs:
bs = BLOCK_SIZE
nblocks = nbytes/bs
nr = nbytes%bs
assert nblocks*bs+nr==nbytes,'Chunking error!'
time.sleep(0.1)
# Prepare the building blocks
block_data = str(os.urandom(bs)) # Repeated nblocks times
block_data_r = str(os.urandom(nr)) # Only once
#block_data = str(time.time()) + 'a'*bs;
#block_data_r = 'a'*nr;
#time.sleep(0.1)
#block_data = str(time.time()) + ":".join(["%09s"%n for n in range(90000)]) #str([str(n)+':' for n in range(10000)])
#block_data_r = ""
# Precompute the checksum - we do it separately before writing the file to avoid the file rename
md5 = hashlib.md5()
for kb in range(nblocks):
md5.update(block_data)
md5.update(block_data_r)
if filemask is None:
filemask = "{md5}"
fn = os.path.join(wdir,filemask.replace('{md5}',md5.hexdigest()))
f = file(fn,'wb')
# write data blocks
for i in range(nblocks):
if slow_write:
logger.info('slow_write=%s %d %s',slow_write,i,fn)
time.sleep(slow_write)
f.write(block_data)
if slow_write:
f.flush()
f.write(block_data_r)
f.close()
logger.info("Written hash file %s, nbytes=%d",fn,nbytes)
return fn,md5.hexdigest()
def analyse_hashfiles(wdir,filemask=None):
""" Analyse files in wdir for md5 correctness.
If filemask is not provided, analyze all possible files found in wdir.
If filemask is provided, analyze only the files which match the filemask pattern ('{md5}' gets replaced by '*')
"""
import glob
import hashlib
ncorrupt = 0
nfiles = 0
nanalysed = 0
import re
if filemask is None:
#match any names containing a block of 32 characters from hex character set
md5_regexp = '\S*([a-fA-F0-9]{32,32})\S*'
else:
# re.escape in order to allow *? in the filemask
# a block of 32 characters from hex character set comes in place of {md5} token
md5_regexp = re.escape(filemask).replace('\{md5\}','([a-fA-F0-9]{32,32})')
md5_pattern = re.compile(md5_regexp)
if filemask is None:
glob_pattern = "*"
else:
glob_pattern = filemask.replace('{md5}','*')
for fn in glob.glob(os.path.normpath(os.path.join(wdir,glob_pattern))):
if not os.path.isfile(fn):
continue # Go for files!
nfiles += 1
m = md5_pattern.match(os.path.basename(fn))
if m:
md5_name = m.group(1)
else:
continue # cannot extract md5 from filename
nanalysed += 1
md5_data = md5sum(fn)
if md5_data!=md5_name:
osize = os.path.getsize(fn)
error_check(False, 'Corrupted file? %s: md5 expected %s computed %s (observed size=%s)'%(fn,repr(md5_name),repr(md5_data),osize))
ncorrupt += 1
logger.info("Found %d files in %s: analysed %d, corrupted %d",nfiles,wdir,nanalysed,ncorrupt)
return (nfiles,nanalysed,ncorrupt)
def md5sum(fn):
import hashlib
md5 = hashlib.md5()
f = file(fn,'rb')
while True:
chunk = f.read(BLOCK_SIZE)
if not chunk: break
md5.update(chunk)
f.close()
return md5.hexdigest()
def adler32(fn):
import zlib
v = 1L
f = file(fn,'rb')
while True:
chunk = f.read(BLOCK_SIZE)
if not chunk: break
v = zlib.adler32(chunk,v)
f.close()
return '%x' % (v & 0xffffffffL)
# TO BE REVIEWED...
def create_hashfile_big(wdir,size=None):
if size is None:
size = config.hashfile_bigsize
return create_random_file(wdir,size=size)
def cleanup_dir(wdir):
import hashlib
import re
conflict=re.compile("test_conflict-\d\d\d\d\d\d\d\d-\d\d\d\d\d\d.dat") # test.dat conflict
fl = os.listdir(wdir)
for f in fl:
md50 = os.path.basename(f)
if md50=='.csync_journal.db':
continue
md51 = md5sum(wdir+'/'+f)
if md50==md51 or f=='test.dat' or conflict.match(f) or 'tobedeleted_' in f:
os.unlink(wdir+'/'+f)
return
def detect_conflict(wdir):
import glob
import hashlib
ll = glob.glob(wdir+'/test_conflict*.dat')
nl = len(ll)
if nl==0:
return 0
elif nl>1:
print '+++ Severe error %d conflict files found!' % nl
sys.exit(1)
else:
bl = os.path.basename(ll[0])
f1 = file(ll[0])
f2 = file(wdir+'/test.dat')
a1 = f1.read()
a2 = f2.read()
f1.close()
f2.close()
l1 = len(a1)
l2 = len(a2)
md51 = hashlib.md5(a1).hexdigest()
md52 = hashlib.md5(a2).hexdigest()
if (md51==md52):
print 'Conflict file identical to original'
return 1
print 'File %s size: %d, test.dat size: %d' % (bl,l1,l2)
minl = 10
if l1<minl: minl=l1
if l2<minl: minl=l2
print 'bl: %s...' % a1[:minl]
print 'test.dat: %s...' % a2[:minl]
return ll[0]
if __name__ == "__main__":
import logging
logging.basicConfig()
logger=logging.getLogger()
import smashbox.utilities
smashbox.utilities.logger = logger
mkdir("TEST-hashfile")
print create_hashfile("TEST-hashfile","mytest_{md5}.jpg",size=1000000)
print create_hashfile("TEST-hashfile","mytest_{md5}.jpg")
print create_hashfile("TEST-hashfile","mytest_{md5}.jpg")
print analyse_hashfiles("TEST-hashfile","mytest_{md5}.jpg")
print analyse_hashfiles("TEST-hashfile")
|
labkode/smashbox
|
python/smashbox/utilities/hash_files.py
|
Python
|
agpl-3.0
| 8,598
|
[
"Gaussian"
] |
967c7028a16e26dc4d7526d9acc3df0d3fd08eb7bcda37bb51b3e8cb2ce3651d
|
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import time
import urllib
from common import util
from common import pb2dict
from bootstrap import settings
from trace import dao
from common import http
from galaxy import sdk
from ftrace import sdk as fsdk
from galaxy import galaxy_pb2
from galaxy import agent_pb2
from galaxy import log_pb2
from galaxy import initd_pb2
from galaxy import master_pb2
from sql import sql_parser
from ftrace import query_pb2
import logging
logger = logging.getLogger("console")
def sql_decorator(func):
def sql_wrapper(request, *args, **kwds):
request.has_err = False
db = request.GET.get("db", None)
if not db:
request.has_err = True
request.err = "db is required"
return func(request, *args, **kwds)
request.db = db
sql = request.GET.get("sql", None)
if not sql:
request.has_err = True
request.err = "sql is required"
return func(request, *args, **kwds)
request.sql = urllib.unquote(sql)
limit = request.GET.get("limit", "10000")
request.limit = int(limit)
return func(request, *args, **kwds)
return sql_wrapper
def query_decorator(func):
def query_wrapper(request, *args, **kwds):
start_time = request.GET.get("start", None)
end_time = request.GET.get("end", None)
request.has_err = False
if not end_time or not start_time:
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
request.start_time = long(time.mktime(start_time.timetuple())) * 1000000
request.end_time = long(time.mktime(end_time.timetuple())) * 1000000
else:
request.start_time = long(start_time)
request.end_time = long(end_time)
db = request.GET.get("db", None)
if not db :
request.has_err = True
request.err = "db is required"
return func(request, *args, **kwds)
request.db = db
table = request.GET.get("table", None)
if not table:
request.has_err = True
request.err = "table is required"
return func(request, *args, **kwds)
request.table = table
fields = request.GET.get("fields", None)
if not fields:
request.has_err = True
request.err = "fields is required"
return func(request, *args, **kwds)
request.fields = fields.split(",")
request.reverse = request.GET.get("reverse", None)
limit = request.GET.get("limit", "100")
request.limit = int(limit)
return func(request, *args, **kwds)
return query_wrapper
def data_filter(data, fields = []):
new_dict = {}
if fields[0] == "*":
return data
for key in data:
if key not in fields:
continue
new_dict[key] = data[key]
return new_dict
def job_event_processor(resultset, fields=[], limit=100):
if not fields:
return []
job_event = log_pb2.JobEvent()
events = []
for result in resultset:
for d in result.data_list:
job_event.ParseFromString(d)
data = pb2dict.protobuf_to_dict(job_event)
data["state"] = master_pb2.JobState.Name(data["state"])
data["level"] = log_pb2.TraceLevel.Name(job_event.level)
data["update_state"] = master_pb2.JobUpdateState.Name(data["update_state"])
events.append(data_filter(data, fields))
return events
def job_stat_processor(resultset, fields=[], limit=100):
if not fields:
return []
stats = []
job_stat = log_pb2.JobStat()
for result in resultset:
for d in result.data_list:
job_stat.ParseFromString(d)
data = util.pb2dict(job_stat)
stats.append(data_filter(data, fields))
stats = sorted(stats, key=lambda x:x["time"])
return stats
def pod_event_processor(resultset, fields=[], limit=100):
if not fields:
return []
events = []
pod_event = log_pb2.PodEvent()
for data in resultset:
for d in data.data_list:
pod_event.ParseFromString(d)
e = util.pb2dict(pod_event)
e["stage"] = galaxy_pb2.PodStage.Name(pod_event.stage)
e["level"] = log_pb2.TraceLevel.Name(pod_event.level)
if e["level"] not in ["TERROR", "TWARNING"]:
continue
e["state"] = galaxy_pb2.PodState.Name(pod_event.state)
events.append(data_filter(e, fields))
events = sorted(events, key=lambda x:x["time"])
return events[0:limit]
def task_event_processor(resultset, fields=[], limit=100):
events = []
task_event = log_pb2.TaskEvent()
for data in resultset:
for d in data.data_list:
task_event.ParseFromString(d)
e = util.pb2dict(task_event)
e["initd_port"] = e["initd_addr"].split(":")[-1]
e["stage"] = agent_pb2.TaskStage.Name(task_event.stage)
e["level"] = log_pb2.TraceLevel.Name(task_event.level)
e["state"] = galaxy_pb2.TaskState.Name(task_event.state)
e["main"] = initd_pb2.ProcessStatus.Name(task_event.main)
e["ftime"] = datetime.datetime.fromtimestamp(e['ttime']/1000000).strftime("%Y-%m-%d %H:%M:%S")
e["deploy"] = initd_pb2.ProcessStatus.Name(task_event.deploy)
events.append(data_filter(e, fields))
events = sorted(events, key=lambda x:x["ttime"], reverse = True)
return events[0:limit]
def cluster_stat_processor(resultset, fields=[], limit=100):
stats = []
cluster_stat = log_pb2.ClusterStat()
for data in resultset:
for d in data.data_list:
cluster_stat.ParseFromString(d)
stats.append(data_filter(util.pb2dict(cluster_stat), fields))
stats = sorted(stats, key=lambda x:x["time"])
return stats[0:limit]
def agent_event_processor(resultset, fields=[], limit=100):
stats = []
agent_event = log_pb2.AgentEvent()
for data in resultset:
for d in data.data_list:
agent_event.ParseFromString(d)
stats.append(data_filter(util.pb2dict(agent_event), fields))
return stats[0:limit]
PROCESSOR_MAP={
"baidu.galaxy":{
"JobEvent":job_event_processor,
"JobStat":job_stat_processor,
"PodEvent":pod_event_processor,
"TaskEvent":task_event_processor,
"ClusterStat":cluster_stat_processor,
"AgentEvent":agent_event_processor
}
}
@query_decorator
def query(request):
builder = http.ResponseBuilder()
if request.has_err:
return builder.error(request.err).build_json()
ftrace = fsdk.FtraceSDK(settings.TRACE_QUERY_ENGINE)
id = request.GET.get("id", None)
jobid = request.GET.get("jobid", None)
podid = request.GET.get("podid", None)
resultset = []
status = False
if id :
resultset, status = ftrace.simple_query(request.db,
request.table,
id,
request.start_time,
request.end_time,
request.limit)
elif jobid:
resultset, status = ftrace.index_query(request.db,
request.table,
"jobid",
jobid,
request.start_time,
request.end_time,
request.limit)
elif podid:
resultset, status = ftrace.index_query(request.db,
request.table,
"pod_id",
podid,
request.start_time,
request.end_time,
request.limit)
if not status:
return builder.error("fail to make a query").build_json()
proc_func = PROCESSOR_MAP[request.db][request.table]
datas= proc_func(resultset, request.fields, request.limit)
return builder.ok(data = {"datas":datas}).build_json()
def index(request):
return util.render_tpl(request, {}, "index.html")
def sql_to_mdt(db, sql, limit):
operator_dict = {
"=":query_pb2.RpcEqualTo,
"<":query_pb2.RpcLess,
"<=":query_pb2.RpcLessEqual,
">":query_pb2.RpcGreater,
}
context, status = sql_parser.SimpleSqlParser().parse(sql)
if not status:
return None, None, False
logger.info(context)
request = query_pb2.RpcSearchRequest()
request.db_name = db
request.table_name = context["table"]
request.limit = limit
conds = []
has_start_time = False
has_end_time = False
for cond in context["conditions"]:
if cond[0] == "id":
request.primary_key = cond[2]
elif cond[0] == "time" and cond[1].startswith(">"):
if isinstance(cond[2], unicode) or isinstance(cond[2], str):
request.start_timestamp = long(time.mktime(time.strptime(cond[2], "%Y-%m-%d %H:%M"))) * 1000000
else:
request.start_timestamp = cond[2]
has_start_time = True
elif cond[0] == "time" and cond[1].startswith("<"):
if isinstance(cond[2], unicode) or isinstance(cond[2], str):
request.end_timestamp = long(time.mktime(time.strptime(cond[2], "%Y-%m-%d %H:%M"))) * 1000000
else:
request.end_timestamp = cond[2]
has_end_time = True
else:
condition = request.condition.add()
condition.cmp_key = cond[2]
condition.cmp = operator_dict[cond[1]]
condition.index_table_name = cond[0]
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 24)
if not has_start_time:
request.start_timestamp = long(time.mktime(start_time.timetuple())) * 1000000
if not has_end_time:
request.end_timestamp = long(time.mktime(end_time.timetuple())) * 1000000
return context, request, True
def gen_tpl(fields):
tpl="""
<table class="table">
<thead>
<tr>
%(head)s
</tr>
</thead>
<tbody>
{{#datas}}
<tr>
%(body)s
</tr>
{{/datas}}
</tbody>
</table>
"""
head = ""
body = ""
for field in fields:
head += "<th>%s</th>"%field
body += "<td>{{%s}}</td>"%field
tpl = tpl%{"head":head, "body":body}
return tpl
@sql_decorator
def squery(request):
builder = http.ResponseBuilder()
if request.has_err:
return builder.error(request.err).build_json()
context, pb_req, ok = sql_to_mdt(request.db, request.sql, request.limit)
if not ok:
return builder.error("fail to parse sql").build_json()
ftrace = fsdk.FtraceSDK(settings.TRACE_QUERY_ENGINE)
resultset, ok = ftrace.make_req(pb_req)
if not ok:
return builder.error("fail to parse sql").build_json()
proc_func = PROCESSOR_MAP[request.db][context["table"]]
datas= proc_func(resultset, context["fields"], request.limit)
return builder.ok(data = {"datas":datas, "tpl":gen_tpl(context["fields"])}).build_json()
def sql(request):
return util.render_tpl(request, {},"sql.html")
def cluster(request):
return util.render_tpl(request, {},"cluster.html")
def job_stat(request):
jobid = request.GET.get("jobid", None)
reverse = request.GET.get("reverse", None)
builder = http.ResponseBuilder()
if not jobid:
return builder.error("jobid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
stats, status = trace_dao.get_job_stat(jobid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get job stat").build_json()
if reverse:
stats = sorted(stats, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"stats":stats}).build_json()
def get_pod_event_by_jobid(request):
jobid = request.GET.get("jobid", None)
reverse = request.GET.get("reverse", None)
builder = http.ResponseBuilder()
if not jobid:
return builder.error("jobid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 24)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
events, status = trace_dao.get_pod_event_by_jobid(jobid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()),
limit=50)
if not status:
return builder.error("fail to get pod event").build_json()
filter_events = []
for e in events:
if e["level"] == "TINFO":
continue
e["ftime"] = datetime.datetime.fromtimestamp(e['time']/1000000).strftime("%Y-%m-%d %H:%M:%S")
filter_events.append(e)
filter_events = sorted(filter_events, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"events": filter_events}).build_json()
def job_event(request):
jobid = request.GET.get("jobid", None)
builder = http.ResponseBuilder()
if not jobid:
return builder.error("jobid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
events, status = trace_dao.get_job_event(jobid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get job evnets").build_json()
events = sorted(events, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"events":events}).build_json()
def get_pod(request):
podid = request.GET.get("podid", None)
if not podid:
return util.render_tpl(request, {}, "404.html")
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
pod_events, status = trace_dao.get_pod_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return util.render_tpl(request, {"err":"fail to get trace"}, "500.html")
task_events, status = trace_dao.get_task_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
return util.render_tpl(request, {"podid":podid,
"pod_events":pod_events,
"task_events":task_events},
"pod_trace.html")
def pod_event(request):
podid = request.GET.get("podid", None)
builder = http.ResponseBuilder()
if not podid:
return builder.error("podid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
events, status = trace_dao.get_pod_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get pod event").build_json()
events = sorted(events, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"events": events}).build_json()
def task_event(request):
podid = request.GET.get("podid", None)
builder = http.ResponseBuilder()
if not podid:
return builder.error("podid is required").build_json()
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
task_events, status = trace_dao.get_task_event(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()),
limit=20)
if not status:
return builder.error("fail to get task event").build_json()
filter_events = []
for e in task_events:
if e["level"] == "TINFO":
continue
e["ftime"] = datetime.datetime.fromtimestamp(e['ttime']/1000000).strftime("%Y-%m-%d %H:%M:%S")
filter_events.append(e)
filter_events = sorted(filter_events, key=lambda x:x["ttime"], reverse = True)
return builder.ok(data = {"events": filter_events}).build_json()
def pod_stat(request):
podid = request.GET.get("podid", None)
builder = http.ResponseBuilder()
if not podid:
return builder.error("podid is required").build_json()
end_time = datetime.datetime.now()
start_time = end_time - datetime.timedelta(hours = 1)
trace_dao = dao.TraceDao(settings.TRACE_QUERY_ENGINE)
stats, status = trace_dao.get_pod_stat(podid,
time.mktime(start_time.timetuple()),
time.mktime(end_time.timetuple()))
if not status:
return builder.error("fail to get pod stat").build_json()
stats = sorted(stats, key=lambda x:x["time"], reverse = True)
return builder.ok(data = {"stats":stats}).build_json()
def job_all(request):
galaxy = sdk.GalaxySDK(settings.GALAXY_MASTER)
jobs, status = galaxy.get_all_job()
job_dicts = []
for job in jobs:
job_dict = pb2dict.protobuf_to_dict(job)
job_dict['state'] = master_pb2.JobState.Name(job_dict['state'])
job_dicts.append(job_dict)
return util.render_tpl(request, {"jobs":job_dicts}, "index.html")
def job_detail(request):
return util.render_tpl(request, {"jobid":request.GET.get("jobid", None)},
"job.html")
def pod_detail(request):
return util.render_tpl(request, {"podid":request.GET.get("podid", None),
"time":request.GET.get("time",None)},
"pod_detail.html")
def get_real_time_status(request):
galaxy = sdk.GalaxySDK(settings.GALAXY_MASTER)
response = galaxy.get_real_time_status()
builder = http.ResponseBuilder()
status = pb2dict.protobuf_to_dict(response)
return builder.ok(data = {"status":status}).build_json()
|
bluebore/galaxy
|
platform/src/trace/views.py
|
Python
|
bsd-3-clause
| 19,257
|
[
"Galaxy"
] |
c2d2d02e749bacc25bcbcba31ea746b5f0c49ccd200727d18cf2401ad44b3838
|
import unittest
import time
import numpy as np
# Standard library imports...
from mock import patch, MagicMock, mock_open
import utils
from core.octopus.patterns.rpcTestPattern import RpcTestPattern
from core.tests.integrationTest import IntegrationTest
class TestIntegrationTest(unittest.TestCase):
# Mock OPC connectionc
# TODO: DRY up setup code
def setUp(self):
patcher = patch('core.octopus.opc.Client')
opc_mock = patcher.start()
opc_mock.can_connect = MagicMock(return_value=True)
opc_mock.put_pixels = MagicMock()
patcher = patch('core.octopus.opc.Client')
opc_mock = patcher.start()
opc_mock.can_connect = MagicMock(return_value=True)
opc_mock.put_pixels = MagicMock()
# Mock open file writing
#patch('%s.open' % __name__, mock_open(), create=True).start()
#patch("__builtin__.open", mock_open(read_data="data")).start()
self.integration_test = IntegrationTest("", patterns=[RpcTestPattern()])
print_string = "".join(["\n", "Running ", self._testMethodName, "\n"])
print print_string, "*"*len(print_string)
# A single exception renders failure,
def test_fails_on_exception(self):
with patch('core.octopus.patterns.rpcTestPattern.RpcTestPattern.next_frame') as mock:
mock.side_effect = Exception("PURPOSELY BROKEN TEST PATTERN")
with self.assertRaises(Exception):
self.integration_test.run(0.1)
if __name__ == '__main__':
unittest.main()
|
TheGentlemanOctopus/thegentlemanoctopus
|
octopus_code/core/tests/integrationTestTests.py
|
Python
|
gpl-3.0
| 1,546
|
[
"Octopus"
] |
dd47e2721cf8ef16fc7e785ceed8d13f6bb98512e8a1b073bbbebbcef24494ca
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This class provides utility classes and functions for use inside permutations
scripts.
"""
import random
import numpy
from nupic.support.configuration import Configuration
class PermuteVariable(object):
"""The base class of all PermuteXXX classes that can be used from within
a permutation script."""
def __init__(self):
pass
def getState(self):
"""Return the current state of this particle. This is used for
communicating our state into a model record entry so that it can be
instantiated on another worker."""
raise NotImplementedError
def setState(self, state):
"""Set the current state of this particle. This is counterpart to getState.
"""
raise NotImplementedError
def getPosition(self):
"""for int vars, returns position to nearest int
Parameters:
--------------------------------------------------------------
retval: current position
"""
raise NotImplementedError
def agitate(self):
"""This causes the variable to jiggle away from its current position.
It does this by increasing its velocity by a multiplicative factor.
Every time agitate() is called, the velocity will increase. In this way,
you can call agitate over and over again until the variable reaches a
new position."""
raise NotImplementedError
#=========================================================================
def newPosition(self, globalBestPosition, rng):
"""Choose a new position based on results obtained so far from other
particles and the passed in globalBestPosition.
Parameters:
--------------------------------------------------------------
globalBestPosition: global best position for this colony
rng: instance of random.Random() used for generating
random numbers
retval: new position
"""
raise NotImplementedError
def pushAwayFrom(self, otherVars, rng):
"""Choose a new position that is as far away as possible from all
'otherVars', where 'otherVars' is a list of PermuteVariable instances.
Parameters:
--------------------------------------------------------------
otherVars: list of other PermuteVariables to push away from
rng: instance of random.Random() used for generating
random numbers
"""
raise NotImplementedError
def resetVelocity(self, rng):
"""Reset the velocity to be some fraction of the total distance. This
is called usually when we start a new swarm and want to start at the
previous best position found in the previous swarm but with a
velocity which is a known fraction of the total distance between min
and max.
Parameters:
--------------------------------------------------------------
rng: instance of random.Random() used for generating
random numbers
"""
raise NotImplementedError
class PermuteFloat(PermuteVariable):
"""Define a permutation variable which can take on floating point values."""
def __init__(self, min, max, stepSize=None, inertia=None, cogRate=None,
socRate=None):
"""Construct a variable that permutes over floating point values using
the Particle Swarm Optimization (PSO) algorithm. See descriptions of
PSO (i.e. http://en.wikipedia.org/wiki/Particle_swarm_optimization)
for references to the inertia, cogRate, and socRate parameters.
Parameters:
-----------------------------------------------------------------------
min: min allowed value of position
max: max allowed value of position
stepSize: if not None, the position must be at min + N * stepSize,
where N is an integer
inertia: The inertia for the particle.
cogRate: This parameter controls how much the particle is affected
by its distance from it's local best position
socRate: This parameter controls how much the particle is affected
by its distance from the global best position
"""
super(PermuteFloat, self).__init__()
self.min = min
self.max = max
self.stepSize = stepSize
# The particle's initial position and velocity.
self._position = (self.max + self.min) / 2.0
self._velocity = (self.max - self.min) / 5.0
# The inertia, cognitive, and social components of the particle
self._inertia = (float(Configuration.get("nupic.hypersearch.inertia"))
if inertia is None else inertia)
self._cogRate = (float(Configuration.get("nupic.hypersearch.cogRate"))
if cogRate is None else cogRate)
self._socRate = (float(Configuration.get("nupic.hypersearch.socRate"))
if socRate is None else socRate)
# The particle's local best position and the best global position.
self._bestPosition = self.getPosition()
self._bestResult = None
def __repr__(self):
"""See comments in base class."""
return ("PermuteFloat(min=%f, max=%f, stepSize=%s) [position=%f(%f), "
"velocity=%f, _bestPosition=%s, _bestResult=%s]" % (
self.min, self.max, self.stepSize, self.getPosition(),
self._position, self._velocity, self._bestPosition,
self._bestResult))
def getState(self):
"""See comments in base class."""
return dict(_position = self._position,
position = self.getPosition(),
velocity = self._velocity,
bestPosition = self._bestPosition,
bestResult = self._bestResult)
def setState(self, state):
"""See comments in base class."""
self._position = state['_position']
self._velocity = state['velocity']
self._bestPosition = state['bestPosition']
self._bestResult = state['bestResult']
def getPosition(self):
"""See comments in base class."""
if self.stepSize is None:
return self._position
# Find nearest step
numSteps = (self._position - self.min) / self.stepSize
numSteps = int(round(numSteps))
position = self.min + (numSteps * self.stepSize)
position = max(self.min, position)
position = min(self.max, position)
return position
def agitate(self):
"""See comments in base class."""
# Increase velocity enough that it will be higher the next time
# newPosition() is called. We know that newPosition multiplies by inertia,
# so take that into account.
self._velocity *= 1.5 / self._inertia
# Clip velocity
maxV = (self.max - self.min)/2
if self._velocity > maxV:
self._velocity = maxV
elif self._velocity < -maxV:
self._velocity = -maxV
# if we at the max or min, reverse direction
if self._position == self.max and self._velocity > 0:
self._velocity *= -1
if self._position == self.min and self._velocity < 0:
self._velocity *= -1
def newPosition(self, globalBestPosition, rng):
"""See comments in base class."""
# First, update the velocity. The new velocity is given as:
# v = (inertia * v) + (cogRate * r1 * (localBest-pos))
# + (socRate * r2 * (globalBest-pos))
#
# where r1 and r2 are random numbers between 0 and 1.0
lb=float(Configuration.get("nupic.hypersearch.randomLowerBound"))
ub=float(Configuration.get("nupic.hypersearch.randomUpperBound"))
self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) *
self._cogRate * (self._bestPosition - self.getPosition()))
if globalBestPosition is not None:
self._velocity += rng.uniform(lb, ub) * self._socRate * (
globalBestPosition - self.getPosition())
# update position based on velocity
self._position += self._velocity
# Clip it
self._position = max(self.min, self._position)
self._position = min(self.max, self._position)
# Return it
return self.getPosition()
def pushAwayFrom(self, otherPositions, rng):
"""See comments in base class."""
# If min and max are the same, nothing to do
if self.max == self.min:
return
# How many potential other positions to evaluate?
numPositions = len(otherPositions) * 4
if numPositions == 0:
return
# Assign a weight to each potential position based on how close it is
# to other particles.
stepSize = float(self.max-self.min) / numPositions
positions = numpy.arange(self.min, self.max + stepSize, stepSize)
# Get rid of duplicates.
numPositions = len(positions)
weights = numpy.zeros(numPositions)
# Assign a weight to each potential position, based on a gaussian falloff
# from each existing variable. The weight of a variable to each potential
# position is given as:
# e ^ -(dist^2/stepSize^2)
maxDistanceSq = -1 * (stepSize ** 2)
for pos in otherPositions:
distances = pos - positions
varWeights = numpy.exp(numpy.power(distances, 2) / maxDistanceSq)
weights += varWeights
# Put this particle at the position with smallest weight.
positionIdx = weights.argmin()
self._position = positions[positionIdx]
# Set its best position to this.
self._bestPosition = self.getPosition()
# Give it a random direction.
self._velocity *= rng.choice([1, -1])
def resetVelocity(self, rng):
"""See comments in base class."""
maxVelocity = (self.max - self.min) / 5.0
self._velocity = maxVelocity #min(abs(self._velocity), maxVelocity)
self._velocity *= rng.choice([1, -1])
class PermuteInt(PermuteFloat):
"""Define a permutation variable which can take on integer values."""
def __init__(self, min, max, stepSize=1, inertia=None, cogRate=None,
socRate=None):
super(PermuteInt, self).__init__(min, max, stepSize, inertia=inertia,
cogRate=cogRate, socRate=socRate)
def __repr__(self):
"""See comments in base class."""
return ("PermuteInt(min=%d, max=%d, stepSize=%d) [position=%d(%f), "
"velocity=%f, _bestPosition=%s, _bestResult=%s]" % (
self.min, self.max, self.stepSize, self.getPosition(),
self._position, self._velocity, self._bestPosition,
self._bestResult))
def getPosition(self):
"""See comments in base class."""
position = super(PermuteInt, self).getPosition()
position = int(round(position))
return position
class PermuteChoices(PermuteVariable):
"""Define a permutation variable which can take on discrete choices."""
def __init__(self, choices, fixEarly=False):
super(PermuteChoices, self).__init__()
self.choices = choices
self._positionIdx = 0
# Keep track of the results obtained for each choice
self._resultsPerChoice = [[]] * len(self.choices)
# The particle's local best position and the best global position
self._bestPositionIdx = self._positionIdx
self._bestResult = None
# If this is true then we only return the best position for this encoder
# after all choices have been seen.
self._fixEarly = fixEarly
# Factor that affects how quickly we assymptote to simply choosing the
# choice with the best error value
self._fixEarlyFactor = .7
def __repr__(self):
"""See comments in base class."""
return "PermuteChoices(choices=%s) [position=%s]" % (self.choices,
self.choices[self._positionIdx])
def getState(self):
"""See comments in base class."""
return dict(_position = self.getPosition(),
position = self.getPosition(),
velocity = None,
bestPosition = self.choices[self._bestPositionIdx],
bestResult = self._bestResult)
def setState(self, state):
"""See comments in base class."""
self._positionIdx = self.choices.index(state['_position'])
self._bestPositionIdx = self.choices.index(state['bestPosition'])
self._bestResult = state['bestResult']
def setResultsPerChoice(self, resultsPerChoice):
"""Setup our resultsPerChoice history based on the passed in
resultsPerChoice.
For example, if this variable has the following choices:
['a', 'b', 'c']
resultsPerChoice will have up to 3 elements, each element is a tuple
containing (choiceValue, errors) where errors is the list of errors
received from models that used the specific choice:
retval:
[('a', [0.1, 0.2, 0.3]), ('b', [0.5, 0.1, 0.6]), ('c', [0.2])]
"""
# Keep track of the results obtained for each choice.
self._resultsPerChoice = [[]] * len(self.choices)
for (choiceValue, values) in resultsPerChoice:
choiceIndex = self.choices.index(choiceValue)
self._resultsPerChoice[choiceIndex] = list(values)
def getPosition(self):
"""See comments in base class."""
return self.choices[self._positionIdx]
def agitate(self):
"""See comments in base class."""
# Not sure what to do for choice variables....
# TODO: figure this out
pass
def newPosition(self, globalBestPosition, rng):
"""See comments in base class."""
# Compute the mean score per choice.
numChoices = len(self.choices)
meanScorePerChoice = []
overallSum = 0
numResults = 0
for i in range(numChoices):
if len(self._resultsPerChoice[i]) > 0:
data = numpy.array(self._resultsPerChoice[i])
meanScorePerChoice.append(data.mean())
overallSum += data.sum()
numResults += data.size
else:
meanScorePerChoice.append(None)
if numResults == 0:
overallSum = 1.0
numResults = 1
# For any choices we don't have a result for yet, set to the overall mean.
for i in range(numChoices):
if meanScorePerChoice[i] is None:
meanScorePerChoice[i] = overallSum / numResults
# Now, pick a new choice based on the above probabilities. Note that the
# best result is the lowest result. We want to make it more likely to
# pick the choice that produced the lowest results. So, we need to invert
# the scores (someLargeNumber - score).
meanScorePerChoice = numpy.array(meanScorePerChoice)
# Invert meaning.
meanScorePerChoice = (1.1 * meanScorePerChoice.max()) - meanScorePerChoice
# If you want the scores to quickly converge to the best choice, raise the
# results to a power. This will cause lower scores to become lower
# probability as you see more results, until it eventually should
# assymptote to only choosing the best choice.
if self._fixEarly:
meanScorePerChoice **= (numResults * self._fixEarlyFactor / numChoices)
# Normalize.
total = meanScorePerChoice.sum()
if total == 0:
total = 1.0
meanScorePerChoice /= total
# Get distribution and choose one based on those probabilities.
distribution = meanScorePerChoice.cumsum()
r = rng.random() * distribution[-1]
choiceIdx = numpy.where(r <= distribution)[0][0]
self._positionIdx = choiceIdx
return self.getPosition()
def pushAwayFrom(self, otherPositions, rng):
"""See comments in base class."""
# Get the count of how many in each position
positions = [self.choices.index(x) for x in otherPositions]
positionCounts = [0] * len(self.choices)
for pos in positions:
positionCounts[pos] += 1
self._positionIdx = numpy.array(positionCounts).argmin()
self._bestPositionIdx = self._positionIdx
def resetVelocity(self, rng):
"""See comments in base class."""
pass
class PermuteEncoder(PermuteVariable):
""" A permutation variable that defines a field encoder. This serves as
a container for the encoder constructor arguments.
"""
def __init__(self, fieldName, encoderClass, name=None, **kwArgs):
super(PermuteEncoder, self).__init__()
self.fieldName = fieldName
if name is None:
name = fieldName
self.name = name
self.encoderClass = encoderClass
# Possible values in kwArgs include: w, n, minval, maxval, etc.
self.kwArgs = dict(kwArgs)
def __repr__(self):
"""See comments in base class."""
suffix = ""
for key, value in self.kwArgs.items():
suffix += "%s=%s, " % (key, value)
return "PermuteEncoder(fieldName=%s, encoderClass=%s, name=%s, %s)" % (
(self.fieldName, self.encoderClass, self.name, suffix))
def getDict(self, encoderName, flattenedChosenValues):
""" Return a dict that can be used to construct this encoder. This dict
can be passed directly to the addMultipleEncoders() method of the
multi encoder.
Parameters:
----------------------------------------------------------------------
encoderName: name of the encoder
flattenedChosenValues: dict of the flattened permutation variables. Any
variables within this dict whose key starts
with encoderName will be substituted for
encoder constructor args which are being
permuted over.
"""
encoder = dict(fieldname=self.fieldName,
name=self.name)
# Get the position of each encoder argument
for encoderArg, value in self.kwArgs.iteritems():
# If a permuted variable, get its chosen value.
if isinstance(value, PermuteVariable):
value = flattenedChosenValues["%s:%s" % (encoderName, encoderArg)]
encoder[encoderArg] = value
# Special treatment for DateEncoder timeOfDay and dayOfWeek stuff. In the
# permutations file, the class can be one of:
# DateEncoder.timeOfDay
# DateEncoder.dayOfWeek
# DateEncoder.season
# If one of these, we need to intelligently set the constructor args.
if '.' in self.encoderClass:
(encoder['type'], argName) = self.encoderClass.split('.')
argValue = (encoder['w'], encoder['radius'])
encoder[argName] = argValue
encoder.pop('w')
encoder.pop('radius')
else:
encoder['type'] = self.encoderClass
return encoder
class Tests(object):
def _testValidPositions(self, varClass, minValue, maxValue, stepSize,
iterations=100):
"""Run a bunch of iterations on a PermuteVar and collect which positions
were visited. Verify that they were all valid.
"""
positions = set()
cogRate = 2.0
socRate = 2.0
inertia = None
gBestPosition = maxValue
lBestPosition = minValue
foundBestPosition = None
foundBestResult = None
rng = random.Random()
rng.seed(42)
var = varClass(min=minValue, max=maxValue, stepSize=stepSize,
inertia=inertia, cogRate=cogRate, socRate=socRate)
for _ in xrange(iterations):
pos = var.getPosition()
if self.verbosity >= 1:
print "pos: %f" % (pos),
if self.verbosity >= 2:
print var
positions.add(pos)
# Set the result so that the local best is at lBestPosition.
result = 1.0 - abs(pos - lBestPosition)
if foundBestResult is None or result > foundBestResult:
foundBestResult = result
foundBestPosition = pos
state = var.getState()
state['bestPosition'] = foundBestPosition
state['bestResult'] = foundBestResult
var.setState(state)
var.newPosition(gBestPosition, rng)
positions = sorted(positions)
print "Positions visited (%d):" % (len(positions)), positions
# Validate positions.
assert (max(positions) <= maxValue)
assert (min(positions) <= minValue)
assert (len(positions)) <= int(round((maxValue - minValue)/stepSize)) + 1
def _testConvergence(self, varClass, minValue, maxValue, targetValue,
iterations=100):
"""Test that we can converge on the right answer."""
gBestPosition = targetValue
lBestPosition = targetValue
foundBestPosition = None
foundBestResult = None
rng = random.Random()
rng.seed(42)
var = varClass(min=minValue, max=maxValue)
for _ in xrange(iterations):
pos = var.getPosition()
if self.verbosity >= 1:
print "pos: %f" % (pos),
if self.verbosity >= 2:
print var
# Set the result so that the local best is at lBestPosition.
result = 1.0 - abs(pos - lBestPosition)
if foundBestResult is None or result > foundBestResult:
foundBestResult = result
foundBestPosition = pos
state = var.getState()
state['bestPosition'] = foundBestPosition
state['bestResult'] = foundBestResult
var.setState(state)
var.newPosition(gBestPosition, rng)
# Test that we reached the target.
print "Target: %f, Converged on: %f" % (targetValue, pos)
assert abs(pos-targetValue) < 0.001
def _testChoices(self):
pc = PermuteChoices(['0', '1', '2', '3'])
counts = [0] * 4
rng = random.Random()
rng.seed(42)
# Check the without results the choices are chosen uniformly.
for _ in range(1000):
pos = int(pc.newPosition(None, rng))
counts[pos] += 1
for count in counts:
assert count < 270 and count > 230
print "No results permuteChoice test passed"
# Check that with some results the choices are chosen with the lower
# errors being chosen more often.
choices = ['1', '11', '21', '31']
pc = PermuteChoices(choices)
resultsPerChoice = []
counts = dict()
for choice in choices:
resultsPerChoice.append((choice, [float(choice)]))
counts[choice] = 0
pc.setResultsPerChoice(resultsPerChoice)
rng = random.Random()
rng.seed(42)
# Check the without results the choices are chosen uniformly.
for _ in range(1000):
choice = pc.newPosition(None, rng)
counts[choice] += 1
# Make sure that as the error goes up, the number of times the choice is
# seen goes down.
prevCount = 1001
for choice in choices:
assert prevCount > counts[choice]
prevCount = counts[choice]
print "Results permuteChoice test passed"
# Check that with fixEarly as you see more data points you begin heavily
# biasing the probabilities to the one with the lowest error.
choices = ['1', '11', '21', '31']
pc = PermuteChoices(choices, fixEarly=True)
resultsPerChoiceDict = dict()
counts = dict()
for choice in choices:
resultsPerChoiceDict[choice] = (choice, [])
counts[choice] = 0
# The count of the highest probability entry, this should go up as more
# results are seen.
prevLowestErrorCount = 0
for _ in range(10):
for choice in choices:
resultsPerChoiceDict[choice][1].append(float(choice))
counts[choice] = 0
pc.setResultsPerChoice(resultsPerChoiceDict.values())
rng = random.Random()
rng.seed(42)
# Check the without results the choices are chosen uniformly.
for _ in range(1000):
choice = pc.newPosition(None, rng)
counts[choice] += 1
# Make sure that as the error goes up, the number of times the choice is
# seen goes down.
assert prevLowestErrorCount < counts['1']
prevLowestErrorCount = counts['1']
print "Fix early permuteChoice test passed"
def run(self):
"""Run unit tests on this module."""
# Set the verbosity level.
self.verbosity = 0
# ------------------------------------------------------------------------
# Test that step size is handled correctly for floats
self._testValidPositions(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, stepSize=0.5)
# ------------------------------------------------------------------------
# Test that step size is handled correctly for ints
self._testValidPositions(varClass=PermuteInt, minValue=2,
maxValue=11, stepSize=3)
# ------------------------------------------------------------------------
# Test that step size is handled correctly for ints
self._testValidPositions(varClass=PermuteInt, minValue=2,
maxValue=11, stepSize=1)
# ------------------------------------------------------------------------
# Test that we can converge on a target value
# Using Float
self._testConvergence(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, targetValue=5.0)
self._testConvergence(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, targetValue=2.2)
self._testConvergence(varClass=PermuteFloat, minValue=2.1,
maxValue=5.1, targetValue=3.5)
# Using int
self._testConvergence(varClass=PermuteInt, minValue=1,
maxValue=20, targetValue=19)
self._testConvergence(varClass=PermuteInt, minValue=1,
maxValue=20, targetValue=1)
#test permute choices
self._testChoices()
################################################################################
if __name__ == '__main__':
# Run all tests
tests = Tests()
tests.run()
|
chetan51/nupic
|
nupic/swarming/permutationhelpers.py
|
Python
|
gpl-3.0
| 26,127
|
[
"Gaussian"
] |
7fa1e2daeefe8cb7ea617129a65510e70fddece589404e5a5e407a8d2e215317
|
"""Install proteomics tools not currently packaged.
"""
import os
import re
from fabric.api import cd
from fabric.context_managers import prefix
from shared import (_if_not_installed, _make_tmp_dir,
_get_install, _make_copy,
_java_install, _symlinked_java_version_dir,
_get_bin_dir, _get_install_subdir,
_fetch_and_unpack,
_create_python_virtualenv,
_get_bitbucket_download_url,
_write_to_file)
from cloudbio.galaxy.utils import _chown_galaxy
# Tools from Tabb lab are only available via TeamCity builds that
# and the artifacts eventually are deleted (I think), storing versions
# for CloudBioLinux at getgalaxyp.msi.umn.edu for safe keeping.
PROTEOMICS_APP_ARCHIVE_URL = "http://getgalaxyp.msi.umn.edu/downloads"
# TODO: Define TPP install root
@_if_not_installed("xinteract")
def install_transproteomic_pipeline(env):
"""
"""
## version should be of form X.X.X-codename
default_version = "4.6.1-occupy"
version = env.get("tool_version", default_version)
version_parts = re.match("(\d\.\d)\.(\d)-(.*)", version)
major_version = version_parts.group(1)
revision = version_parts.group(2)
codename = version_parts.group(3)
if revision == "0":
download_rev = ""
else:
download_rev = ".%s" % revision
download_version = ("%s%s" % (major_version, download_rev))
url_pieces = (major_version, codename, revision, download_version)
url = 'http://sourceforge.net/projects/sashimi/files/Trans-Proteomic Pipeline (TPP)/TPP v%s (%s) rev %s/TPP-%s.tgz' % url_pieces
def _chdir_src(work_cmd):
def do_work(env):
src_dir = "trans_proteomic_pipeline/src" if version == "4.6.1-occupy" else "src"
with cd(src_dir):
env.safe_append("Makefile.config.incl", "TPP_ROOT=%s/" % env["system_install"])
env.safe_append("Makefile.config.incl", "TPP_WEB=/tpp/")
env.safe_append("Makefile.config.incl", "XSLT_PROC=/usr/bin/xsltproc")
env.safe_append("Makefile.config.incl", "CGI_USERS_DIR=${TPP_ROOT}cgi-bin")
work_cmd(env)
return do_work
def _make(env):
env.safe_run("make")
env.safe_sudo("make install")
_get_install(url, env, _chdir_src(_make))
@_if_not_installed("omssacl")
def install_omssa(env):
default_version = "2.1.9"
version = env.get("tool_version", default_version)
url = 'ftp://ftp.ncbi.nih.gov/pub/lewisg/omssa/%s/omssa-%s.linux.tar.gz' % (version, version)
env.safe_sudo("mkdir -p '%s'" % env["system_install"])
## OMSSA really wants mods.xml, usermods.xml, etc... in the same directory
## so just copying everything there.
_get_install(url, env, _make_copy(find_cmd="ls -1", do_make=False))
@_if_not_installed("OpenMSInfo")
def install_openms(env):
"""
See comments above, working on getting this to compile from source. In
the meantime installing from deb will have to do.
"""
default_version = "1.10.0"
version = env.get("tool_version", default_version)
dot_version = version[0:version.rindex('.')]
url = 'http://downloads.sourceforge.net/project/open-ms/OpenMS/OpenMS-%s/OpenMS-%s.tar.gz' % (dot_version, version)
def _make(env):
with cd("contrib"):
env.safe_run("cmake -DINSTALL_PREFIX=%s ." % env.get('system_install'))
env.safe_run("make")
env.safe_run("cmake -DINSTALL_PREFIX=%s ." % env.get('system_install'))
env.safe_run("make")
env.safe_sudo("make install")
_get_install(url, env, _make)
@_if_not_installed("LTQ-iQuant")
def install_tint_proteomics_scripts(env):
default_version = "1.19.19"
version = env.get("tool_version", default_version)
url = "http://artifactory.msi.umn.edu/simple/ext-release-local/msi/umn/edu/tint-proteomics-scripts/%s/tint-proteomics-scripts-%s.zip" % (version, version)
def install_fn(env, install_dir):
env.safe_sudo("mv * '%s'" % install_dir)
bin_dir = _get_bin_dir(env)
for script in ["ITraqScanSummarizer", "LTQ-iQuant", "LTQ-iQuant-cli", "MgfFormatter"]:
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, script), bin_dir))
env.safe_sudo("chmod +x '%s'/*" % bin_dir)
_java_install("tint-proteomics-scripts", version, url, env, install_fn)
@_if_not_installed("ms2preproc")
def install_ms2preproc(env):
default_version = "2009"
version = env.get("tool_version", default_version)
get_cmd = 'wget "http://software.steenlab.org/ms2preproc/ms2preproc.zip" -O ms2preproc.zip'
with _make_tmp_dir() as work_dir:
with cd(work_dir):
env.safe_run(get_cmd)
env.safe_run("unzip ms2preproc.zip")
with cd("ms2preproc"):
env.safe_run("mv ms2preproc-r2821-x86_64 ms2preproc-x86_64")
env.safe_run("chmod +x ms2preproc-x86_64")
install_dir = _get_bin_dir(env)
env.safe_sudo("mv ms2preproc-x86_64 '%s'/ms2preproc" % install_dir)
@_if_not_installed("MZmine")
def install_mzmine(env):
default_version = "2.10"
version = env.get("tool_version", default_version)
url = "http://downloads.sourceforge.net/project/mzmine/mzmine2/%s/MZmine-%s.zip" % (version, version)
def install_fn(env, install_dir):
## Enhanced MZmine startup script that works when used a symbolic link and tailored for CloudBioLinux.
_get_gist_script(env, "https://gist.github.com/jmchilton/5474421/raw/15f3b817fa82d5f5e2143ee08bd248efee951d6a/MZmine")
# Hack for multi-user environment.
env.safe_sudo("chmod -R o+w conf")
env.safe_sudo("mv * '%s'" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "MZmine"), os.path.join(bin_dir, "MZmine")))
_java_install("mzmine2", version, url, env, install_fn)
@_if_not_installed("SearchGUI")
def install_searchgui(env):
default_version = "1.13.1"
version = env.get("tool_version", default_version)
url = "http://searchgui.googlecode.com/files/SearchGUI-%s_mac_and_linux.zip" % version
def install_fn(env, install_dir):
dir_name = "SearchGUI-%s_mac_and_linux" % version
env.safe_sudo("tar -xf %s.tar" % dir_name)
with cd(dir_name):
_get_gist_script(env, "https://gist.github.com/jmchilton/5002161/raw/dc9fa36dd0e6eddcdf43cd2b659e4ecee5ad29df/SearchGUI")
_get_gist_script(env, "https://gist.github.com/jmchilton/5002161/raw/b97fb4d9fe9927de1cfc5433dd1702252e9c0348/SearchCLI")
# Fix known bug with SearchGUI version 1.12.2
env.safe_sudo("find -iname \"*.exe\" -exec rename s/.exe// {} \;")
# Hack for multi-user environment.
env.safe_sudo("chmod -R o+w resources")
env.safe_sudo("mv * '%s'" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "SearchGUI"), os.path.join(bin_dir, "SearchGUI")))
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "SearchCLI"), os.path.join(bin_dir, "SearchCLI")))
_unzip_install("SearchGUI", version, url, env, install_fn)
@_if_not_installed("psm_eval")
def install_psm_eval(env):
default_version = "0.1.0"
version = env.get("tool_version", default_version)
url = "git clone https://github.com/jmchilton/psm-eval.git"
def install_fn(env, install_dir):
env.safe_sudo("cp -r psm-eval/* '%s'" % install_dir)
_create_python_virtualenv(env, "psme", "%s/requirements.txt" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "psm_eval"), os.path.join(bin_dir, "psm_eval")))
_unzip_install("psm_eval", version, url, env, install_fn)
@_if_not_installed("PeptideShaker")
def install_peptide_shaker(env):
default_version = "0.20.1"
version = env.get("tool_version", default_version)
url = "http://peptide-shaker.googlecode.com/files/PeptideShaker-%s.zip" % version
def install_fn(env, install_dir):
_get_gist_script(env, "https://gist.github.com/jmchilton/5002161/raw/f1fe76d6e6eed99a768ed0b9f41c2d0a6a4b24b7/PeptideShaker")
_get_gist_script(env, "https://gist.github.com/jmchilton/5002161/raw/8a17d5fb589984365284e55a98a455c2b47da54f/PeptideShakerCLI")
# Hack for multi-user environment.
env.safe_sudo("chmod -R o+w resources")
env.safe_sudo("mv * '%s'" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "PeptideShaker"), os.path.join(bin_dir, "PeptideShaker")))
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "PeptideShakerCLI"), os.path.join(bin_dir, "PeptideShakerCLI")))
_java_install("PeptideShaker", version, url, env, install_fn)
def _get_gist_script(env, url):
name = url.split("/")[-1]
env.safe_sudo("wget '%s'" % url)
env.safe_sudo("chmod +x '%s'" % name)
@_if_not_installed("Mayu")
def install_mayu(env):
default_version = "1.06"
version = env.get("tool_version", default_version)
url = "http://proteomics.ethz.ch/muellelu/web/LukasReiter/Mayu/package/Mayu.zip"
def install_fn(env, install_dir):
share_dir = _get_install_subdir(env, "share")
env.safe_sudo("mv Mayu '%s'" % share_dir)
bin_dir = _get_bin_dir(env)
executable = "%s/Mayu" % bin_dir
env.safe_sudo("""echo '#!/bin/bash\ncd %s/Mayu; perl Mayu.pl \"$@\"' > %s """ % (share_dir, executable))
env.safe_sudo("chmod +x '%s'" % executable)
_unzip_install("mayu", version, url, env, install_fn)
def install_pride_inspector(env):
default_version = "1.3.0"
version = env.get("tool_version", default_version)
url = "http://pride-toolsuite.googlecode.com/files/pride-inspector-%s.zip" % version
def install_fn(env, install_dir):
_get_gist_script(env, "https://gist.github.com/jmchilton/5474788/raw/6bcffd8680ec0e0301af44961184529a1f76dd3b/pride-inspector")
# Hack for multi-user environment.
env.safe_sudo("chmod -R o+w log config")
env.safe_sudo("mv * '%s'" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "pride-inspector"), os.path.join(bin_dir, "pride-inspector")))
_unzip_install("pride_inspector", version, url, env, install_fn, "PRIDE_Inspector")
def install_pride_converter2(env):
default_version = "2.0.17"
version = env.get("tool_version", default_version)
url = "http://pride-converter-2.googlecode.com/files/pride-converter-%s-bin.zip" % version
def install_fn(env, install_dir):
_get_gist_script(env, "https://gist.github.com/jmchilton/5475119/raw/4e9135ada5114ba149f3ebc8965aee242bfc776f/pride-converter")
# Hack for multi-user environment.
env.safe_sudo("mkdir log; chmod o+w log")
env.safe_sudo("mv * '%s'" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' %s" % (os.path.join(install_dir, "pride-converter"), os.path.join(bin_dir, "pride-converter")))
_unzip_install("pride_converter2", version, url, env, install_fn, ".")
def _unzip_install(pname, version, url, env, install_fn, dir_name="."):
install_dir = _symlinked_java_version_dir(pname, version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_fetch_and_unpack(url, need_dir=False)
with cd(dir_name):
install_fn(env, install_dir)
@_if_not_installed("SuperHirnv03")
def install_superhirn(env):
default_version = "0.03"
version = env.get("tool_version", default_version)
url = "https://github.com/jmchilton/SuperHirn/zipball/%s/SuperHirn.zip" % version
def _chdir(work_cmd):
def do_work(env):
with cd("SuperHirnv03/make"):
work_cmd(env)
return do_work
_get_install(url, env, _chdir(_make_copy(find_cmd="find -perm -100 -name 'SuperHirn*'")))
@_if_not_installed("percolator")
def install_percolator(env):
default_version = "2_04"
version = env.get("tool_version", default_version)
url = "https://github.com/downloads/percolator/percolator/percolator_%s_full_src.tar.gz" % version
def make(env):
with cd(".."):
env.safe_run("env")
env.safe_run("cmake -DCMAKE_INSTALL_PREFIX='%s' . " % env.system_install)
env.safe_run("make -j8")
env.safe_sudo("make install")
_get_install(url, env, make)
@_if_not_installed("PepNovo")
def install_pepnovo(env):
default_version = "20120423"
version = env.get("tool_version", default_version)
url = "http://proteomics.ucsd.edu/Downloads/PepNovo.%s.zip" % version
def install_fn(env, install_dir):
with cd("src"):
env.safe_run("make")
env.safe_sudo("mkdir -p '%s/bin'" % env.system_install)
env.safe_sudo("mkdir -p '%s/share/pepnovo'" % env.system_install)
env.safe_sudo("mv PepNovo_bin '%s/bin/PepNovo'" % env.system_install)
env.safe_sudo("cp -r '../Models' '%s/share/pepnovo'" % env.system_install)
_unzip_install("pepnovo", version, url, env, install_fn)
@_if_not_installed("crux")
def install_crux(env):
default_version = "1.39"
version = env.get("tool_version", default_version)
url = "http://noble.gs.washington.edu/proj/crux/download/crux_%s-x86_64-Linux.zip" % version
def _move(env):
bin_dir = _get_bin_dir(env)
env.safe_sudo("mv bin/* '%s'" % (bin_dir))
_get_install(url, env, _move)
@_if_not_installed("Fido")
def install_fido(env):
version = "2011"
url = 'http://noble.gs.washington.edu/proj/fido/fido.tar.gz'
# Adapted from Jorrit Boekel's mi-deployment fork
# https://bitbucket.org/glormph/mi-deployment-protoeimcs
def _chdir_src(work_cmd):
def do_work(env):
with cd("src/cpp"):
env.safe_append('tmpmake', 'SHELL=/bin/bash')
env.safe_append('tmpmake', 'prefix=%s' % env.get("system_install"))
env.safe_append('tmpmake', 'CPPFLAGS=-Wall -ffast-math -march=x86-64 -pipe -O4 -g')
env.safe_run('cat makefile |grep BINPATH -A 9999 >> tmpmake')
env.safe_run('cp tmpmake makefile')
work_cmd(env)
return do_work
_get_install(url, env, _chdir_src(_make_copy(find_cmd="find ../../bin -perm -100 -name 'Fido*'")))
def install_ipig(env):
""" This tool is installed in Galaxy's jars dir """
# This galaxy specific download probable doesn't belong in this file.
default_version = "r5"
version = env.get("tool_version", default_version)
url = 'http://downloads.sourceforge.net/project/ipig/ipig_%s.zip' % version
pkg_name = 'ipig'
install_dir = os.path.join(env.galaxy_jars_dir, pkg_name)
install_cmd = env.safe_sudo if env.use_sudo else env.safe_run
install_cmd("mkdir -p %s" % install_dir)
with cd(install_dir):
install_cmd("wget %s -O %s" % (url, os.path.split(url)[-1]))
install_cmd("unzip -u %s" % (os.path.split(url)[-1]))
install_cmd("rm %s" % (os.path.split(url)[-1]))
install_cmd('chown --recursive %s:%s %s' % (env.galaxy_user, env.galaxy_user, install_dir))
def install_peptide_to_gff(env):
default_version = "master"
version = env.get("tool_version", default_version)
repository = "hg clone https://jmchilton@bitbucket.org/galaxyp/peptide_to_gff"
def install_fn(env, install_dir):
env.safe_sudo("cp -r peptide_to_gff/* '%s'" % install_dir)
_create_python_virtualenv(env, "peptide_to_gff", "%s/requirements.txt" % install_dir)
bin_dir = os.path.join(env.get("system_install"), "bin")
env.safe_sudo("mkdir -p '%s'" % bin_dir)
env.safe_sudo("ln -s '%s' '%s'" % (os.path.join(install_dir, "peptide_to_gff"), os.path.join(bin_dir, "peptide_to_gff")))
_unzip_install("peptide_to_gff", version, repository, env, install_fn)
def install_galaxy_protk(env):
"""This method installs Ira Cooke's ProtK framework. Very galaxy specific,
can only be installed in context of custom Galaxy tool.
By default this will install ProtK from rubygems server, but if
env.protk_version is set to <version>@<url> (e.g.
1.1.5@https://bitbucket.org/iracooke/protk-working) the
gem will be cloned with hg and installed from source.
"""
if not env.get('galaxy_tool_install', False):
from cloudbio.custom.galaxy import _prep_galaxy
_prep_galaxy(env)
default_version = "1.2.2"
version = env.get("tool_version", default_version)
version_and_revision = version
install_from_source = version_and_revision.find("@") > 0
# e.g. protk_version = 1.1.5@https://bitbucket.org/iracooke/protk-working
if install_from_source:
(version, revision) = version_and_revision.split("@")
url = _get_bitbucket_download_url(revision, "https://bitbucket.org/iracooke/protk")
else:
version = version_and_revision
ruby_version = "1.9.3"
force_rvm_install = False
with prefix("HOME=~%s" % env.galaxy_user):
def rvm_exec(env, cmd="", rvm_cmd="use", with_gemset=False):
target = ruby_version if not with_gemset else "%s@%s" % (ruby_version, "protk-%s" % version)
prefix = ". $HOME/.rvm/scripts/rvm; rvm %s %s; " % (rvm_cmd, target)
env.safe_sudo("%s %s" % (prefix, cmd), user=env.galaxy_user)
if not env.safe_exists("$HOME/.rvm") or force_rvm_install:
env.safe_sudo("curl -L get.rvm.io | bash -s stable; source ~%s/.rvm/scripts/rvm" % (env.galaxy_user), user=env.galaxy_user)
rvm_exec(env, rvm_cmd="install")
rvm_exec(env, cmd="rvm gemset create protk-%s" % version)
if not install_from_source:
# Typical rubygem install
rvm_exec(env, "gem install --no-ri --no-rdoc protk -v %s" % version, with_gemset=True)
else:
with cd("~%s" % env.galaxy_user):
env.safe_sudo("rm -rf protk_source; hg clone '%s' protk_source" % url, user=env.galaxy_user)
rvm_exec(env, "cd protk_source; gem build protk.gemspec; gem install protk", with_gemset=True)
protk_properties = {}
## ProtK can set these up itself, should make that an option.
protk_properties["tpp_root"] = os.path.join(env.galaxy_tools_dir, "transproteomic_pipeline", "default")
protk_properties['openms_root'] = "/usr" # os.path.join(env.galaxy_tools_dir, "openms", "default", "bin")
### Assumes omssa, blast, and transproteomic_pipeline CBL galaxy installs.
protk_properties['omssa_root'] = os.path.join(env.galaxy_tools_dir, "omssa", "default", "bin")
protk_properties['blast_root'] = os.path.join(env.galaxy_tools_dir, "blast+", "default")
protk_properties['pwiz_root'] = os.path.join(env.galaxy_tools_dir, "transproteomic_pipeline", "default", "bin")
# Other properties: log_file, blast_root
env.safe_sudo("mkdir -p \"$HOME/.protk\"", user=env.galaxy_user)
env.safe_sudo("mkdir -p \"$HOME/.protk/Databases\"", user=env.galaxy_user)
import yaml
_write_to_file(yaml.dump(protk_properties), "/home/%s/.protk/config.yml" % env.galaxy_user, "0755")
rvm_exec(env, "protk_setup.rb galaxyenv", with_gemset=True)
install_dir = os.path.join(env.galaxy_tools_dir, "galaxy_protk", version)
env.safe_sudo("mkdir -p '%s'" % install_dir)
_chown_galaxy(env, install_dir)
env.safe_sudo('ln -s -f "$HOME/.protk/galaxy/env.sh" "%s/env.sh"' % install_dir, user=env.galaxy_user)
with cd(install_dir):
with cd(".."):
env.safe_sudo("ln -s -f '%s' default" % version)
@_if_not_installed("myrimatch")
def install_myrimatch(env):
default_version = "2.1.131"
_install_tabb_tool(env, default_version, "myrimatch-bin-linux-x86_64-gcc41-release", ["myrimatch"])
@_if_not_installed("pepitome")
def install_pepitome(env):
default_version = "1.0.45"
_install_tabb_tool(env, default_version, "pepitome-bin-linux-x86_64-gcc41-release", ["pepitome"])
@_if_not_installed("directag")
def install_directag(env):
default_version = "1.3.62"
_install_tabb_tool(env, default_version, "directag-bin-linux-x86_64-gcc41-release", ["adjustScanRankerScoreByGroup", "directag"])
@_if_not_installed("tagrecon")
def install_tagrecon(env):
default_version = "1.4.63"
# TODO: Should consider a better way to handle the unimod xml and blosum matrix.
_install_tabb_tool(env, default_version, "tagrecon-bin-linux-x86_64-gcc41-release", ["tagrecon", "unimod.xml", "blosum62.fas"])
@_if_not_installed("idpQonvert")
def install_idpqonvert(env):
default_version = "3.0.475"
version = env.get("tool_version", default_version)
url = "%s/idpQonvert_%s" % (PROTEOMICS_APP_ARCHIVE_URL, version)
env.safe_run("wget --no-check-certificate -O %s '%s'" % ("idpQonvert", url))
env.safe_run("chmod 755 idpQonvert")
env.safe_sudo("mkdir -p '%s/bin'" % env["system_install"])
env.safe_sudo("mv %s '%s/bin'" % ("idpQonvert", env["system_install"]))
env.safe_sudo("chmod +x '%s/bin/idpQonvert'" % env["system_install"])
def _install_tabb_tool(env, default_version, download_name, exec_names):
version = env.get("tool_version", default_version)
url = "%s/%s-%s.tar.bz2" \
% (PROTEOMICS_APP_ARCHIVE_URL, download_name, version.replace(".", "_"))
_fetch_and_unpack(url, False)
env.safe_sudo("mkdir -p '%s/bin'" % env["system_install"])
for exec_name in exec_names:
env.safe_sudo("mv %s '%s/bin'" % (exec_name, env["system_install"]))
|
heuermh/cloudbiolinux
|
cloudbio/custom/bio_proteomics.py
|
Python
|
mit
| 22,414
|
[
"BLAST",
"Galaxy",
"OpenMS"
] |
73905bb5b9eada023546e8ac44a128e571c76dbb23d83c96472ed9894ec7bbcb
|
#!/usr/bin/env python
"""
Kyle McChesney
Ruffus pipeline for simple bowtie alignment
"""
from ruffus import *
from big_wig_extras import BigWigExtras
import ruffus.cmdline as cmdline
import subprocess
import logging
import os
import pprint
import re
import time
parser = cmdline.get_argparse(description='Given a directory of sorted bam files, convert them to adjusted bigWigs')
# Program arguments -- Most go straight to bowtie
parser.add_argument("--dir", help="Fullpath to the directory where the BAMS are located", required=True)
parser.add_argument("--size", help="Fullpath to size file")
#parser.add_argument("--reads", help="Fullpath to read stats file", required=True)
parser.add_argument("--output", help="Fullpath to output dir", default="./")
# parse the args
options = parser.parse_args()
# Kenny loggins
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log_formatter = logging.Formatter('%(asctime)s {%(levelname)s}: %(message)s')
# file log
time_stamp = str(time.time()).replace(".","")
log_file = options.log_file if options.log_file else os.path.join(options.output,"{}.{}.{}".format("bigWig_pipeline",time_stamp,"log"))
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_formatter)
# console log
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(log_formatter)
# set it all up
log.addHandler(file_handler)
log.addHandler(stream_handler)
log.info("Starting BigWig Run")
extras = BigWigExtras(log)
# pre checkin
input_files = extras.make_bam_list(options.dir)
#extras.make_mill_reads(options.reads)
@transform(input_files, suffix(".sorted.bam"), ".bed", options.output)
def bam_to_bed(input_file, output_file, output):
log.info("Converting %s to a bed file", input_file)
if subprocess.call("bamToBed -i {} > {}".format(input_file, output_file), shell=True):
log.warn("bam to bed conversion of %s failed, exiting", input_file)
raise SystemExit
# now we can move sorted bam to output
file_name = os.path.basename(input_file)
new_name = os.path.join(output, file_name)
os.rename(input_file, new_name)
@transform(bam_to_bed, suffix(".bed"), ".bg", options.size, extras)
def bed_to_bg(input_file, output_file, size_file, extras):
log.info("Converting %s to a genome coverage file", input_file)
#base = os.path.splitext(os.path.basename(input_file))[0]
#mill_reads = extras.get_mill_reads(base)
#scale = 1 / mill_reads
command = "genomeCoverageBed -bg -split -i {} -g {} > {}".format(input_file, size_file, output_file)
if subprocess.call(command, shell=True):
log.warn("bed to coverage conversion of %s failed, exiting", input_file)
extras.report_error("bed_to_bg","bed to bg conversion of {} failed".format(input_file))
raise SystemExit
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
@transform(bed_to_bg, formatter(), options.output+"{basename[0]}.bw", options.size, options.output)
def bg_to_bw(input_file, output_file, size_file, output):
log.info("Creating bigwig file from bg: %s", input_file)
command = "bedGraphToBigWig {} {} {}".format( input_file, size_file, output_file)
if subprocess.call(command, shell=True):
log.warn("bg to bw conversion of %s failed, exiting", input_file)
extras.report_error("bg_to_bw","bg to bw conversion of {} failed".format(input_file))
raise SystemExit
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
# call out to external bwtools here
@merge(bg_to_bw, os.path.join(options.output,"bigWigStats-"+time_stamp+".out"))
def bw_stats(input_files, output_file):
# we are going to call bwtool summary and bwtool distribution
# have to explicitly send stdout stuff like that
# what a program
summary = "bwtool summary 10000 -header -with-sum {} /dev/stdout"
dist = "bwtool distribution {} /dev/stdout"
for input_file in input_files:
log.info("Running bigwig stats on {}".format(input_file))
with open(output_file, "a+") as stats:
for command in [summary, dist]:
try:
output = subprocess.check_output(command.format(os.path.abspath(input_file)).split())
if command.startswith("bwtool summary"):
stats.write("#### bwtool summary for {}\n".format(input_file))
stats.write(output)
stats.write("####\n")
# filter zeros out
else:
output = output.rstrip()
output_clean = [line for line in output.split("\n") if line.split('\t')[1] != '0']
stats.write("#### bwtool distribution for {}\n".format(input_file))
stats.write("depth\tcount\n")
stats.write("\n".join(output_clean))
stats.write("\n####\n")
except subprocess.CalledProcessError:
log.warn("{} failed running on {}".format(command, input_file))
raise SystemExit
stats.write("\n\n")
# run the pipelined
cmdline.run(options)
|
mbiokyle29/pipelines
|
wig/bigwig_pipeline.py
|
Python
|
mit
| 5,414
|
[
"Bowtie"
] |
a2fb39bae67991d63adc7ee47567f3fdfb1e9bcef85ae1e71f758361c90ffc7f
|
""" The SGE TimeLeft utility interrogates the SGE batch system for the
current CPU consumed, as well as its limit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import re
import time
import socket
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import runCommand
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.ResourceUsage import ResourceUsage
class SGEResourceUsage(ResourceUsage):
"""
This is the SGE plugin of the TimeLeft Utility
"""
def __init__(self):
"""Standard constructor"""
super(SGEResourceUsage, self).__init__("SGE", "JOB_ID")
self.queue = os.environ.get("QUEUE")
sgePath = os.environ.get("SGE_BINARY_PATH")
if sgePath:
os.environ["PATH"] += ":" + sgePath
self.log.verbose("JOB_ID=%s, QUEUE=%s" % (self.jobID, self.queue))
self.startTime = time.time()
def getResourceUsage(self):
"""Returns S_OK with a dictionary containing the entries CPU, CPULimit,
WallClock, WallClockLimit, and Unit for current slot.
"""
cmd = "qstat -f -j %s" % (self.jobID)
result = runCommand(cmd)
if not result["OK"]:
return result
cpu = None
cpuLimit = None
wallClock = None
wallClockLimit = None
lines = str(result["Value"]).split("\n")
for line in lines:
if re.search("usage.*cpu.*", line):
match = re.search(r"cpu=([\d,:]*),", line)
if match:
cpuList = match.groups()[0].split(":")
try:
newcpu = 0.0
if len(cpuList) == 3:
newcpu = float(cpuList[0]) * 3600 + float(cpuList[1]) * 60 + float(cpuList[2])
elif len(cpuList) == 4:
newcpu = (
float(cpuList[0]) * 24 * 3600
+ float(cpuList[1]) * 3600
+ float(cpuList[2]) * 60
+ float(cpuList[3])
)
if not cpu or newcpu > cpu:
cpu = newcpu
except ValueError:
self.log.warn('Problem parsing "%s" for CPU consumed' % line)
if re.search("hard resource_list.*cpu.*", line):
match = re.search(r"_cpu=(\d*)", line)
if match:
cpuLimit = float(match.groups()[0])
match = re.search(r"_rt=(\d*)", line)
if match:
wallClockLimit = float(match.groups()[0])
else:
self.log.warn("No hard limits found")
# Some SGE batch systems apply CPU scaling factor to the CPU consumption figures
if cpu:
factor = _getCPUScalingFactor()
if factor:
cpu = cpu / factor
consumed = {"CPU": cpu, "CPULimit": cpuLimit, "WallClock": wallClock, "WallClockLimit": wallClockLimit}
if None in consumed.values():
missed = [key for key, val in consumed.items() if val is None]
msg = "Could not determine parameter"
self.log.warn("Could not determine parameter", ",".join(missed))
self.log.debug("This is the stdout from the batch system call\n%s" % (result["Value"]))
else:
self.log.debug("TimeLeft counters complete:", str(consumed))
if cpuLimit or wallClockLimit:
# We have got a partial result from SGE
if not cpuLimit:
# Take some margin
consumed["CPULimit"] = wallClockLimit * 0.8
if not wallClockLimit:
consumed["WallClockLimit"] = cpuLimit / 0.8
if not cpu:
consumed["CPU"] = time.time() - self.startTime
if not wallClock:
consumed["WallClock"] = time.time() - self.startTime
self.log.debug("TimeLeft counters restored:", str(consumed))
return S_OK(consumed)
else:
msg = "Could not determine necessary parameters"
self.log.info(msg, ":\nThis is the stdout from the batch system call\n%s" % (result["Value"]))
retVal = S_ERROR(msg)
retVal["Value"] = consumed
return retVal
def _getCPUScalingFactor():
host = socket.getfqdn()
cmd = "qconf -se %s" % host
result = runCommand(cmd)
if not result["OK"]:
return None
lines = str(result["Value"]).split("\n")
for line in lines:
if re.search("usage_scaling", line):
match = re.search(r"cpu=([\d,\.]*),", line)
if match:
return float(match.groups()[0])
return None
|
ic-hep/DIRAC
|
src/DIRAC/Resources/Computing/BatchSystems/TimeLeft/SGEResourceUsage.py
|
Python
|
gpl-3.0
| 4,887
|
[
"DIRAC"
] |
65a4a69764303954e797182b57e6b6a8943a8f58db9325bf2840116998cf8e4d
|
#
# File:
# TRANS_read_ASCII.py
#
# Synopsis:
# Illustrates how to read an ASCII file
#
# Categories:
# I/O
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to read an ASCII file.
#
# Effects illustrated:
# o Read ASCII data
#
# Output:
# -
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
"""
Transition Guide Python Example: TRANS_read_ASCII.py
- read netCDF file
- retrieve variable informations
Input file: Test_6h.csv
2.00;3.50;5.10;8.20
2.40;3.10;4.80;8.90
2.60;3.70;5.30;10.10
2.75;3.90;5.55;10.25
3.00;4.10;6.05;10.50
2018-08-28 kmf
"""
from __future__ import print_function
import numpy as np
import Ngl
#-- data file name
diri = "/Users/k204045/local/miniconda2/envs/pyn_env/lib/ncarg/data/nug/"
fili = "Test_6h.csv"
#-- number of lines and columns in input file
nrows = 5
ncols = 4
#-- read all data
vals = Ngl.asciiread(diri+fili,(nrows,ncols),"float",sep=';')
#-- print information
print("vals: " + str(vals))
print("")
print("--> rank of vals: " + str(len(vals.shape)))
print("--> shape vals: " + str(vals.shape))
exit()
|
KMFleischer/PyEarthScience
|
Transition_examples_NCL_to_PyNGL/read_data/TRANS_read_ASCII.py
|
Python
|
mit
| 1,300
|
[
"NetCDF"
] |
d21e6e691936d46538e5d8b363cb16fd8f3a4753de24134e8cc5e7fbbb3b0230
|
# A Python program to run speed and evaluate the performance of MPIR
# routines.
#
# Copyright (c) 2009, Brian Gladman, Worcester, UK.
#
# This file is part of the MPIR Library. The MPIR Library is free
# software; you can redistribute it and/or modify it under the terms
# of the GNU Lesser General Public License version 2.1 as published
# by the Free Software Foundation.
#
# The MPIR Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along
# with the MPIR Library; see the file COPYING.LIB. If not, write to
# the Free Software Foundation, Inc., 51Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
from __future__ import print_function
import sys
import os
import shutil
import string
import copy
import code
import math
import platform
from subprocess import Popen, PIPE, STDOUT
if sys.platform.startswith('win'):
dir = '.\\x64\\release\\'
else :
dir = './'
ll = [
'-c -s 10(10)1000 noop',
'-c -s 10(10)1000 noop_wxs',
'-c -s 10(10)1000 noop_wxys',
'-c -s 10(10)1000 mpn_add_n',
'-c -s 10(10)1000 mpn_sub_n',
'-c -s 10(10)1000 mpn_addadd_n',
'-c -s 10(10)1000 mpn_subadd_n',
'-c -s 10(10)1000 mpn_addsub_n',
'-c -s 10(10)1000 mpn_karaadd',
'-c -s 10(10)1000 mpn_karasub',
'-c -s 10(10)1000 mpn_addmul_1.3333',
'-c -s 10(10)1000 mpn_submul_1.3333',
'-c -s 10(10)1000 mpn_submul_2',
'-c -s 10(10)1000 mpn_mul_1.3333',
'-c -s 10(10)1000 mpn_mul_1_inplace.3333',
'-c -s 10(10)1000 mpn_mul_2',
'-c -s 10(10)1000 mpn_divrem_euclidean_qr_1.3333',
'-c -s 10(10)1000 mpn_divrem_euclidean_qr_2',
'-c -s 10(10)1000 mpn_divrem_euclidean_r_1.3333',
'-c -s 10(10)1000 mpn_divrem_hensel_qr_1.3333',
'-c -s 10(10)1000 mpn_divrem_hensel_qr_1_1.3333',
'-c -s 10(10)1000 mpn_divrem_hensel_qr_1_2.3333',
'-c -s 10(10)1000 mpn_divrem_hensel_r_1.3333',
'-c -s 10(10)1000 mpn_rsh_divrem_hensel_qr_1.3333',
'-c -s 10(10)1000 mpn_rsh_divrem_hensel_qr_1_1.3333',
'-c -s 10(10)1000 mpn_rsh_divrem_hensel_qr_1_2.3333',
'-c -s 10(10)1000 mpn_divrem_hensel_rsh_qr_1.3333',
'-c -s 10(10)1000 mpn_divrem_1.3333',
'-c -s 10(10)1000 mpn_divrem_1f.3333',
'-c -s 10(10)1000 mpn_mod_1.3333',
'-c -s 10(10)1000 mpn_mod_1_1',
'-c -s 10(10)1000 mpn_mod_1_2',
'-c -s 10(10)1000 mpn_mod_1_3',
'-c -s 10(10)1000 mpn_mod_1_k.3',
'-c -s 10(10)1000 mpn_preinv_divrem_1.3333',
'-c -s 10(10)1000 mpn_preinv_divrem_1f.3333',
'-c -s 10(10)1000 mpn_preinv_mod_1.3333',
'-c -s 10(10)1000 mpn_add_err1_n',
'-c -s 10(10)1000 mpn_sub_err1_n',
'-c -s 10(10)1000 mpn_inv_divappr_q',
'-c -s 10(10)1000 mpn_inv_div_qr',
'-c -s 10(10)1000 mpn_dc_divappr_q',
'-c -s 10(10)1000 mpn_dc_div_qr_n',
'-c -s 10(10)1000 mpn_divrem_1_inv.3333',
'-c -s 10(10)1000 mpn_divrem_1f_div.3333',
'-c -s 10(10)1000 mpn_divrem_1f_inv.3333',
'-c -s 10(10)1000 mpn_mod_1_div.3333',
'-c -s 10(10)1000 mpn_mod_1_inv.3333',
'-c -s 10(10)1000 mpn_divrem_2',
'-c -s 10(10)1000 mpn_divrem_2_div',
'-c -s 10(10)1000 mpn_divrem_2_inv',
'-c -s 10(10)1000 mpn_divexact_1.3333',
'-c -s 10(10)1000 mpn_divexact_by3',
'-c -s 10(10)1000 mpn_divexact_byff',
'-c -s 10(10)1000 mpn_divexact_byfobm1.3333',
'-c -s 10(10)1000 mpn_modexact_1_odd.333',
'-c -s 10(10)1000 mpn_modexact_1c_odd.333',
'-c -s 10(10)1000 mpn_mod_34lsub1',
'-c -s 10(10)1000 mpn_dc_tdiv_qr',
'-c -s 10(10)1000 mpn_lshift.33',
'-c -s 10(10)1000 mpn_rshift.33',
'-c -s 10(10)1000 mpn_lshift1',
'-c -s 10(10)1000 mpn_rshift1',
'-c -s 10(10)1000 mpn_double',
'-c -s 10(10)1000 mpn_half',
'-c -s 10(10)1000 mpn_lshift2',
'-c -s 10(10)1000 mpn_rshift2',
'-c -s 10(10)1000 mpn_and_n',
'-c -s 10(10)1000 mpn_andn_n',
'-c -s 10(10)1000 mpn_nand_n',
'-c -s 10(10)1000 mpn_ior_n',
'-c -s 10(10)1000 mpn_iorn_n',
'-c -s 10(10)1000 mpn_nior_n',
'-c -s 10(10)1000 mpn_xor_n',
'-c -s 10(10)1000 mpn_xnor_n',
'-c -s 10(10)1000 mpn_com_n',
'-c -s 10(10)1000 mpn_not',
'-c -s 10(10)1000 mpn_popcount',
'-c -s 10(10)1000 mpn_hamdist',
'-c -s 10(10)1000 MPN_ZERO',
'-c -s 10(10)1000 MPN_COPY',
'-c -s 10(10)1000 MPN_COPY_INCR',
'-c -s 10(10)1000 MPN_COPY_DECR',
'-c -s 10(10)1000 count_leading_zeros',
'-c -s 10(10)1000 gmp_allocate_free',
'-c -s 10(10)1000 malloc_realloc_free',
'-c -s 10(10)1000 gmp_allocate_reallocate_free',
'-c -s 10(10)1000 malloc_free',
'-c -s 10(10)1000 mpn_umul_ppmm',
'-c -s 10(10)1000 mpz_add',
'-c -s 10(10)1000 mpz_init_realloc_clear',
'-c -s 10(10)1000 mpz_init_clear',
'-c -s 10(10)1000 udiv_qrnnd',
'-c -s 10(10)1000 udiv_qrnnd_c',
'-c -s 10(10)1000 udiv_qrnnd_preinv1',
'-c -s 10(10)1000 udiv_qrnnd_preinv2',
'-c -s 10(10)1000 umul_ppmm',
'-c -s 10(10)1000 mpn_popcount',
'-c -s 10(10)1000 mpn_hamdist',
]
lq = [
'-c -s 10(10)1000 mpn_dc_divrem_n',
'-c -s 10(10)1000 mpn_dc_divrem_sb',
'-c -s 10(10)1000 mpn_dc_tdiv_qr',
'-c -s 10(10)1000 mpn_kara_mul_n',
'-c -s 10(10)1000 mpn_kara_sqr_n',
'-c -s 10(10)1000 mpn_mul_basecase',
'-c -s 1000(500)10000 -t 10 mpn_mul_fft_full',
'-c -s 10(10)1000 mpn_mul_n',
'-c -s 10(10)1000 mpn_sqr_basecase',
'-c -s 10(10)1000 mpn_sqr_n',
'-c -s 50(10)1000 mpn_toom3_mul_n',
'-c -s 50(10)1000 mpn_toom3_sqr_n',
'-c -s 1(5)100 mpz_powm',
]
# run an executable and return its error return value and any output
def run_exe(exe, args, inp) :
al = {'stdin' : PIPE, 'stdout' : PIPE, 'stderr' : STDOUT }
if sys.platform.startswith('win') :
al['creationflags'] = 0x08000000
p = Popen([exe] + args.split(' '), **al)
res = p.communicate(inp.encode())[0].decode()
ret = p.poll()
return (ret, res)
# output a matrix implemented as a dictionary
def mout(m, n) :
for r in range(n) :
print('\n{0:3d}'.format(r), end='')
for c in range(n) :
print('{0:18.4f}'.format(m[(r,c)]) , end='')
print
# output a vector
def vout(v) :
print(' ' , end='')
for c in v :
print('{0:18.4f}'.format(c) , end='')
print()
# In-place LU matrix decomposition. The diagonal
# elements of the upper triangular matrix U are
# all 1 and are not stored. Pivoting is used and
# the matrix is implemented as a dictionary. It
# is only intended for use with small matrices.
def LU_decompose(A, n) :
p = [0] * n
for k in range(n) :
# find pivot
p[k] = k
max = math.fabs(A[(k,k)])
for j in range(k + 1, n) :
if max < math.fabs(A[(j,k)]) :
max = math.fabs(A[(j,k)])
p[k] = j
# exchange rows if necessary
if p[k] != k :
for j in range(n) :
A[(k,j)], A[(p[k],j)] = A[(p[k],j)], A[(k,j)]
# exit if matrix is singular
if A[(k,k)] == 0.0 :
return None
# set upper triangular elements
for j in range(k + 1,n) :
A[(k,j)] /= A[(k,k)]
# update remaining part of original matrix
for i in range(k + 1, n) :
for j in range(k + 1, n) :
A[(i,j)] -= A[(i,k)] * A[(k,j)]
# return pivot array
return p
# Use the LU decomposition above to solve the matrix
# equation A x = b for x given A and b
def LU_solve(A, p, b) :
n = len(p)
x = [0] * n
# calculate U x = L^-1 b
for k in range(n) :
if p[k] != k :
b[k], b[p[k]] = b[p[k]], b[k]
x[k] = b[k]
for i in range(k) :
x[k] -= x[i] * A[(k,i)]
x[k] /= A[(k,k)]
# back substitute for x = U^-1 (L^-1 b)
for k in reversed(range(n)) :
if p[k] != k :
b[k], b[p[k]], b[p[k]], b[k]
for i in range(k + 1, n) :
x[k] -= x[i] * A[(k,i)]
return x
def lsq_solve(x, y, n) :
m = {} # matrix as dictionary
v = [] # vector as list
# set up matrix and vectors for least squares
for i in range(n) :
v.append(sum(xx ** i * yy for xx, yy in zip(x, y)))
for j in range(i, n) :
m[(i,j)] = m[(j,i)] = sum(xx ** (i + j) for xx in x)
# decompose the matrix into lower and upper triangular
# matrices
p = LU_decompose(m, n)
if p != None :
return LU_solve(m, p, v)
else :
return None
def do_lsq(x, y, lsq_size) :
# get least squares coefficients
f = lsq_solve(x, y, lsq_size)
# now find the standard deviation from the curve
s = 0
for i in range(len(x)) :
t = sum(f[j] * x[i] ** j for j in range(lsq_size))
s += (y[i] - t) ** 2
sd = 2 * math.sqrt(s / len(x))
# now remove 'outliers' - data points outside twice
# the standard deviation
sc = 0
for i in reversed(range(len(x))) :
t = sum(f[j] * x[i] ** j for j in range(lsq_size))
if math.fabs(y[i] - t) > sd :
del x[i]
del y[i]
sc += 1
# if we had to remove more than 10% of measurements
# declare that the result is not stable
if 10 * sc > len(x) :
return None
else :
return f
print('Machine:', platform.processor())
print('Running:', platform.platform())
print('SPEED CURVE (l: no of limbs) cycles: c[0] + c[1] * l + c[2] * l^2')
print('ROUTINE ', end = '')
print(' c[0] c[1] c[2]')
lines = ''
cnt = 0
lsq_size = 4
for args in ll + lq :
cnt += 1
# run speed for each routine in the list above
ret = run_exe(os.path.join(dir, 'speed'), args, '')
# parse the output to produce limbs[] and times[]
x = []
y = []
lines = ret[1].split('\n')
for l in lines :
if len(l) :
s = l.split()
try :
t = [float(i) for i in s]
except :
continue
x += [t[0]]
y += [t[1]]
# output the name of the routine
nn = args.split(' ')[-1]
print('{0:<30s}'.format(nn) , end='')
if not len(x) :
# print(ret[1].strip(), '(failed to parse output)')
print('(failed to parse output)')
continue
q = 0 if args in ll else 1
rep = q
while rep < 3 :
rep += 1
f = do_lsq(x, y, lsq_size)
if f != None :
break
else :
print('not stable')
continue
if args in lq :
print('{0[0]:11.1f} {0[1]:11.1f} {0[2]:11.1f}'.format(f))
else :
print('{0[0]:11.1f} {0[1]:11.1f}'.format(f))
|
jpflori/mpir
|
build.vc15/run-speed.py
|
Python
|
gpl-3.0
| 10,546
|
[
"Brian"
] |
562d6546b32c681e8a9fab17bab8474e72ac7d62d3ef9b283a54487c3fddf3f5
|
# Turta IoT HAT Helper for Raspbian
# Distributed under the terms of the MIT license.
# Python Driver for Relay Controller
# Version 1.01
# Updated: July 14th, 2018
# Visit https://docs.turta.io for documentation.
import RPi.GPIO as GPIO
class RelayController:
"""Relay Controller"""
#Variables
is_initialized = False
#Pins
relay1, relay2 = 20, 12
#Initialize
def __init__(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.relay1, GPIO.OUT)
GPIO.setup(self.relay2, GPIO.OUT)
GPIO.output(self.relay1, GPIO.LOW)
GPIO.output(self.relay2, GPIO.LOW)
self.is_initialized = True
return
#Relay Control
def set_relay(self, ch, st):
"""Controls the relay.
:param ch: Relay channel. 1 or 2.
:param st: Relay state. True of False."""
if (ch == 1):
GPIO.output(self.relay1, GPIO.HIGH if st else GPIO.LOW)
elif (ch == 2):
GPIO.output(self.relay2, GPIO.HIGH if st else GPIO.LOW)
return
#Relay Readout
def read_relay_state(self, ch):
"""Reads the relay state.
:param ch: Relay channel. 1 or 2."""
if (ch == 1):
return GPIO.input(self.relay1)
elif (ch == 2):
return GPIO.input(self.relay2)
#Disposal
def __del__(self):
"""Releases the resources."""
if self.is_initialized:
GPIO.output(self.relay1, GPIO.LOW)
GPIO.output(self.relay2, GPIO.LOW)
GPIO.cleanup()
del self.is_initialized
return
|
Turta-io/IoTHAT
|
DriverSource/Raspbian/Python/Turta_RelayController.py
|
Python
|
mit
| 1,618
|
[
"VisIt"
] |
60d6c2b50fe5e654fe9788fbce367548a29bcf96734845d15b1eac64b2619f32
|
""" Proxy Renewal agent is the key element of the Proxy Repository
which maintains the user proxies alive
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.FrameworkSystem.DB.ProxyDB import ProxyDB
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
class MyProxyRenewalAgent(AgentModule):
def initialize(self):
requiredLifeTime = self.am_getOption("MinimumLifeTime", 3600)
renewedLifeTime = self.am_getOption("RenewedLifeTime", 54000)
self.proxyDB = ProxyDB(useMyProxy=True)
gLogger.info("Minimum Life time : %s" % requiredLifeTime)
gLogger.info("Life time on renew : %s" % renewedLifeTime)
gLogger.info("MyProxy server : %s" % self.proxyDB.getMyProxyServer())
gLogger.info("MyProxy max proxy time : %s" % self.proxyDB.getMyProxyMaxLifeTime())
self.__threadPool = ThreadPool(1, 10)
return S_OK()
def __renewProxyForCredentials(self, userDN, userGroup):
lifeTime = self.am_getOption("RenewedLifeTime", 54000)
gLogger.info("Renewing for %s@%s %s secs" % (userDN, userGroup, lifeTime))
retVal = self.proxyDB.renewFromMyProxy(userDN,
userGroup,
lifeTime=lifeTime)
if not retVal['OK']:
gLogger.error("Failed to renew proxy", "for %s@%s : %s" % (userDN, userGroup, retVal['Message']))
else:
gLogger.info("Renewed proxy for %s@%s" % (userDN, userGroup))
def __treatRenewalCallback(self, oTJ, exceptionList):
gLogger.exception(lException=exceptionList)
def execute(self):
""" The main agent execution method
"""
self.proxyDB.purgeLogs()
gLogger.info("Purging expired requests")
retVal = self.proxyDB.purgeExpiredRequests()
if retVal['OK']:
gLogger.info(" purged %s requests" % retVal['Value'])
gLogger.info("Purging expired proxies")
retVal = self.proxyDB.purgeExpiredProxies()
if retVal['OK']:
gLogger.info(" purged %s proxies" % retVal['Value'])
retVal = self.proxyDB.getCredentialsAboutToExpire(self.am_getOption("MinimumLifeTime", 3600))
if not retVal['OK']:
return retVal
data = retVal['Value']
gLogger.info("Renewing %s proxies..." % len(data))
for record in data:
userDN = record[0]
userGroup = record[1]
self.__threadPool.generateJobAndQueueIt(self.__renewProxyForCredentials,
args=(userDN, userGroup),
oExceptionCallback=self.__treatRenewalCallback)
self.__threadPool.processAllResults()
return S_OK()
|
yujikato/DIRAC
|
src/DIRAC/FrameworkSystem/Agent/MyProxyRenewalAgent.py
|
Python
|
gpl-3.0
| 2,776
|
[
"DIRAC"
] |
7074acd1f48d02f445144fdeb25f35b7016d3864d081bbd3f779ff8847f95d58
|
# $HeadURL: $
''' VOBOXAvailabilityCommand module
'''
import urlparse
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
__RCSID__ = '$Id: $'
class VOBOXAvailabilityCommand( Command ):
'''
Given an url pointing to a service on a vobox, use DIRAC ping against it.
'''
def doCommand( self ):
'''
The Command pings a service on a vobox, it needs a service URL to ping it.
It returns a dict with the following:
{
'serviceUpTime' : <serviceUpTime>,
'machineUpTime' : <machineUpTime>,
'site' : <site>,
'system' : <system>,
'service' : <service>
}
'''
## INPUT PARAMETERS
if not 'serviceURL' in self.args:
return self.returnERROR( S_ERROR( '"serviceURL" not found in self.args' ) )
serviceURL = self.args[ 'serviceURL' ]
##
parsed = urlparse.urlparse( serviceURL )
site = parsed[ 1 ].split( ':' )[ 0 ]
try:
system, service = parsed[ 2 ].strip( '/' ).split( '/' )
except ValueError:
return self.returnERROR( S_ERROR( '"%s" seems to be a malformed url' % serviceURL ) )
pinger = RPCClient( serviceURL )
resPing = pinger.ping()
if not resPing[ 'OK' ]:
return self.returnERROR( resPing )
serviceUpTime = resPing[ 'Value' ].get( 'service uptime', 0 )
machineUpTime = resPing[ 'Value' ].get( 'host uptime', 0 )
result = {
'site' : site,
'system' : system,
'service' : service,
'serviceUpTime' : serviceUpTime,
'machineUpTime' : machineUpTime
}
return S_OK( result )
#FIXME: how do we get the values !!
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
sposs/DIRAC
|
ResourceStatusSystem/Command/VOBOXAvailabilityCommand.py
|
Python
|
gpl-3.0
| 2,271
|
[
"DIRAC"
] |
a7d373dd52a1b97312707c99a93eb73995fde616cd2af2c3c9447a37d87abf41
|
import pandas as pd
import neuroelectro.models as m
__author__ = 'shreejoy'
def assign_error_type_to_data_tables():
'''uses computational method to assign standard deviation error types to each data table and user submission object'''
# for articles with some nedms and has a data table
data_tables = m.DataTable.objects.filter(datasource__neuronephysdatamap__isnull = False).distinct()
for dt in data_tables:
nedm_list = dt.datasource_set.all()[0].neuronephysdatamap_set.all()
# use computational method to assign error type based on values
calculated_error_type_bool = identify_stdev(nedm_list) # returns true if stdev
# check existing dt error type and calculated error type
if dt.error_type is not 'sd' and calculated_error_type_bool:
print 'assigning error type sd to data table %s' % dt.pk
dt.error_type = 'sd'
dt.save()
# for articles with some nedms and has a data table
user_subs = m.UserSubmission.objects.filter(datasource__neuronephysdatamap__isnull = False).distinct()
for us in user_subs:
nedm_list = us.datasource_set.all()[0].neuronephysdatamap_set.all()
# use computational method to assign error type based on values
calculated_error_type_bool = identify_stdev(nedm_list) # returns true if stdev
# check existing us error type and calculated error type
if us.error_type is not 'sd' and calculated_error_type_bool:
print 'assigning error type sd to user submission %s with article pk %s' % (us.pk, us.article.pk)
us.error_type = 'sd'
us.save()
def identify_stdev(nedm_list):
'''A computational method to identify whether error terms on neuron ephys data maps are standard deviations,
relies on idea that standard deviations are typically large relative to the mean vs standard errors'''
mean_list = [nedm.val_norm for nedm in nedm_list]
err_list = [nedm.err_norm for nedm in nedm_list]
sd_ratio = .175 # ratio of mean to error above which a SD is assumed
fract_greater = .5 # fraction of how many nedms need to have a higher error than expected?
df = pd.DataFrame()
df['means'] = mean_list
df['errs'] = err_list
greater_count = sum(df['errs'] / df['means'] > sd_ratio)
total_count = df['errs'].count()
if total_count <= 0:
return False
if float(greater_count) / total_count > fract_greater:
return True
else:
return False
|
neuroelectro/neuroelectro_org
|
db_functions/identify_error_type.py
|
Python
|
gpl-2.0
| 2,519
|
[
"NEURON"
] |
79539852a31b47995ecedaf08c1c17dbd3c4dd19605462cca64c3bc573ef0aef
|
from lightify import Luminary
from Firefly import logging
from Firefly.const import ACTION_LEVEL, AUTHOR, COMMAND_SET_LIGHT, COMMAND_UPDATE, EVENT_ACTION_OFF, EVENT_ACTION_ON, LEVEL, SWITCH, ACTION_ON, ACTION_OFF
from Firefly.helpers.device import COLOR, COLOR_TEMPERATURE
from Firefly.helpers.device_types.light import Light
from Firefly.util.color import Colors, check_ct
TITLE = 'Lightify Device'
COMMANDS = [EVENT_ACTION_ON, EVENT_ACTION_OFF, ACTION_LEVEL, COMMAND_UPDATE, COMMAND_SET_LIGHT]
INITIAL_VALUES = {
'_state': EVENT_ACTION_OFF,
'_manufacturername': 'lightify',
'_on': False,
'_switch': 'off',
'_hue': 0,
'_sat': 0,
'_bri': 0,
'_reachable': False,
'_type': 'unknown',
'_ct': 0,
'_level': 0,
'_transition_time': 20
}
CAPABILITIES = {
LEVEL: True,
SWITCH: True,
COLOR_TEMPERATURE: True,
COLOR: True
}
REQUESTS = [SWITCH, LEVEL]
class LightifyDevice(Light):
def __init__(self, firefly, package, title, author, commands, requests, device_type, **kwargs):
if kwargs.get('initial_values') is not None:
INITIAL_VALUES.update(kwargs['initial_values'])
kwargs.update({
'initial_values': INITIAL_VALUES,
'commands': COMMANDS,
'requests': REQUESTS
})
super().__init__(firefly, package, TITLE, AUTHOR, capabilities=CAPABILITIES, **kwargs)
self.add_command(COMMAND_UPDATE, self.update_lightify)
self.lightify_object: Luminary = kwargs.get('lightify_object')
self.lightify_type = kwargs.get('lightify_type')
self._export_ui = kwargs.get('export_ui', True)
def set_light(self, switch=None, level=None, colors=Colors(), ct=None, **kwargs):
if switch is not None:
self.lightify_object.set_onoff(switch == ACTION_ON)
self.update_values(switch=switch)
if level is not None:
self.lightify_object.set_luminance(level, self._transition_time)
self.update_values(level=level)
if colors.is_set:
self.lightify_object.set_rgb(colors.r, colors.g, colors.b, self._transition_time)
self.update_values(r=colors.r, g=colors.g, b=colors.b)
if ct is not None:
ct = check_ct(ct)
self.lightify_object.set_temperature(ct, self._transition_time)
self.update_values(ct=ct)
def update_lightify(self, **kwargs):
pass
|
Firefly-Automation/Firefly
|
Firefly/components/lightify/lightify_device.py
|
Python
|
apache-2.0
| 2,437
|
[
"Firefly"
] |
847414490e3a1a686b6ea13e69ed3a73a5630e2f8c53f089292f7460eea2ff52
|
from basics import BubbleFinder2D
from basics.utils import sig_clip
from spectral_cube import SpectralCube
import astropy.units as u
import matplotlib.pyplot as p
import scipy.ndimage as nd
from scipy.stats import binned_statistic
import numpy as np
from skimage.segmentation import find_boundaries
from skimage.morphology import medial_axis
from astropy.utils.console import ProgressBar
from corner import hist2d
import os
from os.path import join as osjoin
import seaborn as sb
from aplpy import FITSFigure
from astropy.io import fits
from astropy.table import Table, Column
from paths import (fourteenB_HI_data_wGBT_path,
iram_co21_14B088_data_path,
allfigs_path)
from constants import hi_freq
from galaxy_params import gal_feath as gal
from plotting_styles import (default_figure, twocolumn_figure,
onecolumn_figure,
twocolumn_twopanel_figure)
default_figure()
'''
Calculate the intensities of HI and CO as a function of distance from the
edges of the adap. thresh mask.
'''
fig_path = allfigs_path("co_vs_hi")
if not os.path.exists(fig_path):
os.mkdir(fig_path)
np.random.seed(34678953)
# Plot a bunch
verbose = False
# slicer = (slice(825, 1033), slice(360, 692))
slicer = (slice(None), slice(None))
# Load in the rotation subtracted cubes
hi_cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("downsamp_to_co/M33_14B-088_HI.clean.image.GBT_feathered.2.6kms.fits"))
co_cube = SpectralCube.read(iram_co21_14B088_data_path("m33.co21_iram.14B-088_HI.fits"))
co_cube = co_cube.spectral_slab(hi_cube.spectral_extrema[0],
hi_cube.spectral_extrema[1])
# Skip the first 7 channels and the last 15 channels
vels = co_cube.spectral_axis.to(u.km / u.s)[7:-15]
# Get the radius array so we can cut to where the CO data is valid
radii = gal.radius(header=hi_cube[0].header)
max_radius = 6.0 * u.kpc
all_dists = []
all_radii = []
all_vals_hi = []
all_vals_co = []
edge_masks = []
skeleton_dists = []
skeleton_dists_pix = []
skeleton_widths = []
masks = []
hi_beam = hi_cube.beam
# Estimate the noise level in an equivalent slab
hi_mom0 = hi_cube[-1]
sigma = sig_clip(hi_mom0.value, nsig=10) * \
hi_beam.jtok(hi_freq).value
# Skip the first 7 channels
i_offset = 7
for i, vel in enumerate(ProgressBar(vels)):
hi_chan = hi_cube[i + i_offset] * hi_beam.jtok(hi_freq) / u.Jy
co_chan = co_cube[i + i_offset]
# Need a mask from the HI
# Adjust the sigma in a single channel to the moment0 in the slab
# sigma = 0.00152659 * hi_slab.shape[0] * \
# np.abs((hi_slab.spectral_axis[1] - hi_slab.spectral_axis[0]).value)
bub = BubbleFinder2D(hi_chan, auto_cut=False, sigma=sigma)
bub.create_mask(bkg_nsig=5, region_min_nsig=10, mask_clear_border=False)
skeleton, dists = medial_axis(~bub.mask, return_distance=True)
skeleton_dists.append(skeleton * dists)
edge_mask = find_boundaries(bub.mask, connectivity=2, mode='outer')
hole_mask = bub.mask.copy()
# Now apply a radial boundary to the edge mask where the CO data is valid
# This is the same cut-off used to define the valid clouds
radial_cut = radii <= max_radius
edge_mask *= radial_cut
edge_masks.append(edge_mask)
dist_trans = nd.distance_transform_edt(~edge_mask)
# Assign negative values to regions within holes.
dist_trans[hole_mask] = -dist_trans[hole_mask]
# hist = p.hist(co_mom0.value[np.isfinite(co_mom0.value)], bins=100)
# p.draw()
# raw_input("?")
# p.clf()
# print(np.nanmin(co_mom0.value))
# print(np.nanmax(co_mom0.value[radial_cut]))
# print(np.nanmin(co_mom0.value[radial_cut]))
all_dists.extend(list(dist_trans[radial_cut]))
all_radii.extend(list(radii.value[radial_cut]))
all_vals_hi.extend(list(hi_chan.value[radial_cut]))
all_vals_co.extend(list(co_chan.value[radial_cut]))
# Track the width of the mask
skeleton_widths.extend(list((skeleton * dists)[radial_cut]))
# Also record all of the distances from the centre of the skeletons
skeleton_dists_pix.extend(list(nd.distance_transform_edt(~skeleton)[radial_cut]))
masks.append(~bub.mask)
if verbose:
print("Velocity: {}".format(vel))
fig, ax = p.subplots(1, 2, sharex=True, sharey=True,
subplot_kw=dict(projection=hi_chan.wcs))
ax[0].imshow(np.arctan(hi_chan[slicer].value /
np.nanpercentile(hi_chan.value, 85)),
origin='lower', vmin=0)
# p.contour(skeleton, colors='b')
ax[0].contour(edge_mask[slicer], colors='g')
# p.imshow(hole_mask, origin='lower')
ax[1].imshow(co_chan.value[slicer],
origin='lower', vmin=0.01, vmax=0.1)
# p.imshow(co_chan.value, origin='lower')
# p.contour(skeleton, colors='b')
ax[1].contour(edge_mask[slicer], colors='g')
lat = ax[1].coords[1]
lat.set_ticklabel_visible(False)
p.draw()
raw_input("Next plot?")
p.clf()
all_dists = np.array(all_dists)
all_radii = np.array(all_radii)
all_vals_hi = np.array(all_vals_hi)
all_vals_co = np.array(all_vals_co)
skeleton_widths = np.array(skeleton_widths)
skeleton_dists_pix = np.array(skeleton_dists_pix)
# Make a figure from one of the channels to highlight the mask shape
twocolumn_twopanel_figure()
mpl_fig = p.figure()
spatial_slice = (slice(720, 900), slice(350, 750))
fig = FITSFigure((hi_cube[47][spatial_slice] * hi_beam.jtok(hi_freq).value).hdu,
figure=mpl_fig)
fig.show_grayscale(invert=True, vmin=None, vmax=80, stretch='sqrt')
fig.add_colorbar()
fig.colorbar.set_axis_label_text("HI Intensity (K)")
fig.show_contour(co_cube[47][spatial_slice].hdu, cmap='autumn',
levels=[0.05, 0.1, 0.2, 0.3])
fig.show_contour(fits.PrimaryHDU(masks[40].astype(int), hi_cube[0].header),
colors=[sb.color_palette()[-1]], levels=[0.5])
fig.hide_axis_labels()
p.tight_layout()
fig.savefig(osjoin(fig_path, "mask_edge_img_vel_minus196.pdf"))
fig.savefig(osjoin(fig_path, "mask_edge_img_vel_minus196.png"))
fig.close()
# Make a version with the skeleton instead of the mask
mpl_fig = p.figure()
spatial_slice = (slice(720, 900), slice(350, 750))
fig = FITSFigure((hi_cube[47][spatial_slice] * hi_beam.jtok(hi_freq).value).hdu,
figure=mpl_fig)
fig.show_grayscale(invert=True, vmin=None, vmax=80, stretch='sqrt')
fig.add_colorbar()
fig.colorbar.set_axis_label_text("HI Intensity (K)")
fig.show_contour(fits.PrimaryHDU(medial_axis(masks[40]).astype(int),
hi_cube[0].header),
colors=[sb.color_palette()[-1]], levels=[0.5])
fig.show_contour(co_cube[47][spatial_slice].hdu, cmap='autumn',
levels=[0.05, 0.1, 0.2, 0.3])
fig.hide_axis_labels()
p.tight_layout()
fig.savefig(osjoin(fig_path, "mask_skel_img_vel_minus196.pdf"))
fig.savefig(osjoin(fig_path, "mask_skel_img_vel_minus196.png"))
fig.close()
# Now bin all of the distances against the HI and CO intensities.
bins = np.arange(-30, 30, 1)
hi_vals, bin_edges, bin_num = \
binned_statistic(all_dists, all_vals_hi,
bins=bins,
statistic=np.mean)
co_vals, bin_edges, bin_num = \
binned_statistic(all_dists, all_vals_co,
bins=bins,
statistic=np.mean)
binned_elements = \
binned_statistic(all_dists, np.ones_like(all_dists), bins=bins,
statistic=np.sum)[0]
# Require that there be 100 points in each bin
bin_cutoff = binned_elements >= 100
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width / 2
bin_centers = bin_centers[bin_cutoff]
co_vals = co_vals[bin_cutoff]
hi_vals = hi_vals[bin_cutoff]
# Let's bootstrap to get errors in the distance bins
niters = 100
hi_samps = np.zeros((niters, len(bin_centers)))
co_samps = np.zeros((niters, len(bin_centers)))
print("Bootstrapping")
for i in ProgressBar(niters):
hi_samps[i] = \
binned_statistic(all_dists, np.random.permutation(all_vals_hi),
bins=bins,
statistic=np.mean)[0][bin_cutoff]
co_samps[i] = \
binned_statistic(all_dists, np.random.permutation(all_vals_co),
bins=bins,
statistic=np.mean)[0][bin_cutoff]
# Take the stds in the distribution for each bin
hi_errs = np.nanstd(hi_samps, axis=0)
co_errs = np.nanstd(co_samps, axis=0)
# Convert the bin_centers to pc
pixscale = \
hi_mom0.header['CDELT2'] * (np.pi / 180.) * gal.distance.to(u.pc).value
bin_centers *= pixscale
onecolumn_figure()
cpal = sb.color_palette()
ax = p.subplot(111)
pl1 = ax.errorbar(bin_centers, hi_vals,
yerr=hi_errs, fmt="D-", color=cpal[0],
label="HI", drawstyle='steps-mid')
ax_2 = ax.twinx()
pl2 = ax_2.errorbar(bin_centers, co_vals * 1000.,
yerr=co_errs, fmt="o--",
color=cpal[1],
label="CO(2-1)", drawstyle='steps-mid')
pls = [pl1[0], pl2[0]]
labs = ["HI", "CO(2-1)"]
ax.legend(pls, labs, frameon=True)
ax.set_xlabel("Distance from Mask Edge (pc)")
ax.set_ylabel(r"HI Mean Intensity (K)")
ax_2.set_ylabel(r"CO Mean Intensity (mK)")
ax.axvline(0.0, color='k')
ax.grid()
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_edge_radial_profiles.pdf"))
p.savefig(osjoin(fig_path, "mask_edge_radial_profiles.png"))
p.close()
# Show the total number of elements in each distance bin
p.semilogy(pixscale * (bin_edges[1:] - bin_width / 2),
binned_elements, 'D-')
p.axhline(100, linestyle='--', color=sb.color_palette()[1])
p.xlabel("Distance from mask edge (pc)")
p.ylabel("Number of pixels")
p.grid()
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_edge_radial_profiles_numbin.pdf"))
p.savefig(osjoin(fig_path, "mask_edge_radial_profiles_numbin.png"))
p.close()
# Now investigate the significance of the distance correlations.
# Randomize the order of the CO and HI intensities.
# hi_rand_vals = \
# binned_statistic(all_dists, np.random.permutation(all_vals_hi),
# bins=bins,
# statistic=np.nanmean)[0]
# co_rand_vals = \
# binned_statistic(all_dists, np.random.permutation(all_vals_co),
# bins=bins,
# statistic=np.nanmean)[0]
# p.plot(bin_centers, hi_rand_vals / np.nanmax(hi_rand_vals), 'bD-',
# label="HI")
# p.plot(bin_centers, co_rand_vals / np.nanmax(co_rand_vals), 'ro-',
# label="CO(2-1)")
# # p.xlim([0.0, 200])
# p.ylim([0.0, 1.1])
# p.xlabel("Distance from mask edge (pc)")
# p.ylabel("Normalized Intensity")
# p.legend(loc='upper left')
# p.grid()
# p.draw()
# p.savefig(paper1_figures_path("mask_edge_radial_profiles_randbin.pdf"))
# p.savefig(paper1_figures_path("mask_edge_radial_profiles_randbin.png"))
# p.close()
# Compare the CDFs of the intensities within the masks to demonstrate CO
# is not colocated with all of the HI
pos_hi = all_vals_hi[all_dists > 0]
pos_co = all_vals_co[all_dists > 0]
onecolumn_figure()
p.plot(np.sort(pos_hi), np.cumsum(np.sort(pos_hi)) / np.sum(pos_hi), "-",
label="HI")
p.plot(np.sort(pos_hi), np.cumsum(pos_co[np.argsort(pos_hi)]) / np.sum(pos_co),
"--", label="CO")
p.legend(loc='upper left', frameon=True)
p.grid()
p.ylim([-0.05, 1.05])
p.ylabel("CDF")
p.xlabel("HI Intensity (K)")
p.tight_layout()
p.savefig(osjoin(fig_path, "inmask_hi_co_cdfs.pdf"))
p.savefig(osjoin(fig_path, "inmask_hi_co_cdfs.png"))
p.close()
# Perform the same analysis split up into radial bins
dr = 500 * u.pc
max_radius = max_radius.to(u.pc)
nbins = np.int(np.floor(max_radius / dr))
inneredge = np.linspace(0, max_radius - dr, nbins)
outeredge = np.linspace(dr, max_radius, nbins)
Nrows = 4
Ncols = 3
twocolumn_figure()
p.figure(1, figsize=(8.4, 11)).clf()
fig, ax = p.subplots(Nrows, Ncols,
sharex=True,
sharey=True, num=1)
fig.text(0.5, 0.04, 'Distance from mask edge (pc)', ha='center')
fig.text(0.04, 0.5, 'Normalized Intensity', va='center', rotation='vertical')
p.subplots_adjust(hspace=0.1,
wspace=0.1)
hi_vals_rad = []
hi_errs_rad = []
co_vals_rad = []
co_errs_rad = []
for ctr, (r0, r1) in enumerate(zip(inneredge,
outeredge)):
r, c = np.unravel_index(ctr, (Nrows, Ncols))
idx = np.logical_and(all_radii >= r0.value,
all_radii < r1.value)
hi_vals_bin, bin_edges, bin_num = \
binned_statistic(all_dists[idx], all_vals_hi[idx],
bins=bins,
statistic=np.mean)
co_vals_bin, bin_edges, bin_num = \
binned_statistic(all_dists[idx], all_vals_co[idx],
bins=bins,
statistic=np.mean)
binned_elements = \
binned_statistic(all_dists[idx], np.ones_like(all_dists)[idx],
bins=bins,
statistic=np.sum)[0]
bin_cutoff = binned_elements >= 30
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width / 2
bin_centers = bin_centers[bin_cutoff]
co_vals_bin = co_vals_bin[bin_cutoff]
hi_vals_bin = hi_vals_bin[bin_cutoff]
hi_samps = np.zeros((niters, len(bin_centers)))
co_samps = np.zeros((niters, len(bin_centers)))
print("Bootstrapping")
for i in ProgressBar(niters):
hi_samps[i] = \
binned_statistic(all_dists[idx],
np.random.permutation(all_vals_hi[idx]),
bins=bins,
statistic=np.mean)[0][bin_cutoff]
co_samps[i] = \
binned_statistic(all_dists[idx],
np.random.permutation(all_vals_co[idx]),
bins=bins,
statistic=np.mean)[0][bin_cutoff]
# Take the stds in the distribution for each bin
hi_errs_bin = np.nanstd(hi_samps, axis=0)
co_errs_bin = np.nanstd(co_samps, axis=0)
hi_vals_rad.append(hi_vals)
hi_errs_rad.append(hi_errs)
co_vals_rad.append(co_vals)
co_errs_rad.append(co_errs)
ax[r, c].errorbar(bin_centers * pixscale,
hi_vals_bin / np.nanmax(hi_vals_bin),
yerr=hi_errs_bin / np.nanmax(hi_vals_bin),
fmt="D-", drawstyle='steps-mid',
label="HI")
ax[r, c].errorbar(bin_centers * pixscale,
co_vals_bin / np.nanmax(co_vals_bin),
yerr=co_errs_bin / np.nanmax(co_vals_bin),
fmt="o--", drawstyle='steps-mid',
label="CO(2-1)")
ax[r, c].annotate("{0} to {1}".format(r0.to(u.kpc).value, r1.to(u.kpc)),
xy=(-360, -0.3),
color='k',
fontsize=11,
bbox={"boxstyle": "square", "facecolor": "w"})
# ax[r, c].set_ylim([0.0, 1.1])
# ax[r, c].set_xlim([-400, 220])
# ax[r, c].set_xlabel("Distance from mask edge (pc)")
# ax[r, c].set_ylabel("Normalized Intensity")
# p.title("Radii {} to {}".format(r0, r1))
ax[r, c].axvline(0.0, color='k')
if ctr == 0:
ax[r, c].legend(loc='upper left', fontsize=11, frameon=True)
ax[r, c].grid()
ax[r, c].set_ylim([-0.4, 1.1])
# for r in range(Nrows):
# for c in range(Ncols):
# if r == Nrows - 1:
# ax[r, c].set_xticklabels(ax[r, c].xaxis.get_majorticklabels(),
# rotation=45)
fig.savefig(osjoin(fig_path, "mask_edge_radial_profiles_byradius.pdf"))
fig.savefig(osjoin(fig_path, "mask_edge_radial_profiles_byradius.png"))
p.close()
onecolumn_figure()
# Is the variation being driven by a change in the width of the regions?
bins = np.arange(0, 6.5, 0.5) * 1000
dists, bin_edges, bin_num = \
binned_statistic(all_radii[skeleton_widths > 0],
skeleton_widths[skeleton_widths > 0],
bins=bins, statistic=np.mean)
dist_std = \
binned_statistic(all_radii[skeleton_widths > 0],
skeleton_widths[skeleton_widths > 0],
bins=bins, statistic=np.std)[0]
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width / 2
ang_conv = (hi_mom0.header["CDELT2"] * u.deg).to(u.arcsec)
phys_conv = ang_conv.to(u.rad).value * 840e3 * u.pc
p.plot(all_radii[skeleton_widths > 0] / 1000.,
skeleton_widths[skeleton_widths > 0] * phys_conv.value,
'ko', alpha=0.1, ms=3.0, zorder=-1)
p.errorbar(bin_centers / 1000., dists * phys_conv.value,
yerr=dist_std * phys_conv.value,
marker='', linestyle='-', linewidth=2, elinewidth=2)
p.ylabel("Width of mask regions (pc)")
p.xlabel("Radius (kpc)")
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_width_byradius.pdf"))
p.savefig(osjoin(fig_path, "mask_width_byradius.png"))
p.close()
# Let's take some other views of this data, while we're at it.
twocolumn_twopanel_figure()
fig, ax = p.subplots(1, 2, sharex=True)
hist2d(skeleton_widths[skeleton_widths > 0] * phys_conv.value,
all_vals_co[skeleton_widths > 0], bins=10,
ax=ax[1], data_kwargs={"alpha": 0.6})
ax[1].set_xlabel("Mask Width (pc)")
ax[1].set_ylabel(r"CO Intensity (K)")
ax[1].grid()
hist2d(skeleton_widths[skeleton_widths > 0] * phys_conv.value,
all_vals_hi[skeleton_widths > 0], bins=10,
ax=ax[0], data_kwargs={"alpha": 0.6})
ax[0].set_xlabel("Mask Width (pc)")
ax[0].set_ylabel(r"HI Intensity (K)")
ax[0].grid()
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_widthvsintensity.pdf"))
p.savefig(osjoin(fig_path, "mask_widthvsintensity.png"))
p.close()
# HI vs. CO with all skeleton distances (not just on the skeleton like above)
bins = np.arange(0, 36, 1)
selector_pts = np.logical_and(all_vals_co > 0,
np.logical_and(all_vals_hi > 0,
skeleton_dists_pix < 35))
hi_mean, bin_edges, bin_num = \
binned_statistic(skeleton_dists_pix[selector_pts],
all_vals_hi[selector_pts],
bins=bins, statistic=np.mean)
hi_std = \
binned_statistic(skeleton_dists_pix[selector_pts],
all_vals_hi[selector_pts],
bins=bins, statistic=np.std)[0]
co_mean = \
binned_statistic(skeleton_dists_pix[selector_pts],
all_vals_co[selector_pts],
bins=bins, statistic=np.mean)[0]
co_std = \
binned_statistic(skeleton_dists_pix[selector_pts],
all_vals_co[selector_pts],
bins=bins, statistic=np.std)[0]
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width / 2
num_in_bins = np.bincount(bin_num)[1:]
# Num. indep't points divided by number of pixels in one beam.
num_indept = num_in_bins / 41.
twocolumn_twopanel_figure()
fig, ax = p.subplots(1, 2, sharex=True)
ax[1].plot(skeleton_dists_pix[selector_pts] * phys_conv.value,
all_vals_co[selector_pts], 'ko', ms=2.0, alpha=0.6,
rasterized=True, zorder=-1)
ax[1].set_xlabel("Distance from Mask Centre (pc)")
ax[1].set_ylabel(r"CO Intensity (K)")
ax[1].grid()
ax[0].plot(skeleton_dists_pix[selector_pts] * phys_conv.value,
all_vals_hi[selector_pts], 'ko', ms=2.0, alpha=0.6,
rasterized=True, zorder=-1)
ax[0].set_xlabel("Distance from Mask Centre (pc)")
ax[0].set_ylabel(r"HI Intensity (K)")
ax[0].grid()
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist.pdf"))
p.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist.png"))
p.close()
# Find the HWHM points for the radial profiles
def find_perc_width(x, y, level=0.5):
'''
Return the equivalent Gaussian sigma based on the HWHM positions.
'''
from scipy.interpolate import InterpolatedUnivariateSpline
assert (level > 0.) & (level < 1.)
# Assume that the profile peaks at the centre and monotonically
# decreases. This is true for our comparisons here.
peak = y.max()
bkg = np.mean(y[-5:])
halfmax = (peak - bkg) * level
# Model the spectrum with a spline
# x values must be increasing for the spline, so flip if needed.
interp1 = InterpolatedUnivariateSpline(x, y - halfmax - bkg, k=3)
hwhm_points = interp1.roots()
if len(hwhm_points) < 1:
raise ValueError("Didn't find HWHM!")
elif len(hwhm_points) > 1:
hwhm_points = [min(hwhm_points)]
return hwhm_points[0]
co_hwhm = find_perc_width(bin_centers, co_mean) * phys_conv.value
hi_hwhm = find_perc_width(bin_centers, hi_mean) * phys_conv.value
# Widths at the 25 and 75th percentiles between peak and bkg.
co_lowq = find_perc_width(bin_centers, co_mean, level=0.25) * phys_conv.value
hi_lowq = find_perc_width(bin_centers, hi_mean, level=0.25) * phys_conv.value
co_highq = find_perc_width(bin_centers, co_mean, level=0.75) * phys_conv.value
hi_highq = find_perc_width(bin_centers, hi_mean, level=0.75) * phys_conv.value
print(co_hwhm, hi_hwhm)
# (67.480769229465864, 100.43889752239386)
print(co_lowq, hi_lowq)
# (103.06346550608011, 169.52993127884227)
print(co_highq, hi_highq)
# (40.128517012921016, 56.468698633140313)
onecolumn_figure()
col_pal = sb.color_palette()
ax = p.subplot(111)
ax.axvline(hi_hwhm, linestyle='-', color=col_pal[0],
linewidth=3, alpha=0.6)
ax.axvline(co_hwhm, linestyle='--', color=col_pal[1],
linewidth=3, alpha=0.6)
pl1 = ax.errorbar(bin_centers * phys_conv.value, hi_mean,
yerr=hi_std / np.sqrt(num_indept), color=col_pal[0],
marker='D', linestyle='-', linewidth=2, elinewidth=2,
label='HI')
ax.set_xlabel("Distance from Skeleton (pc)")
ax.set_ylabel(r"HI Mean Intensity (K)")
ax_2 = ax.twinx()
pl2 = ax_2.errorbar(bin_centers * phys_conv.value, co_mean,
yerr=co_std / np.sqrt(num_indept), color=col_pal[1],
marker='o', linestyle='--', linewidth=2, elinewidth=2,
label='CO')
ax_2.set_ylabel(r"CO Mean Intensity (K)")
pls = [pl1[0], pl2[0]]
labs = ['HI', 'CO']
ax.legend(pls, labs, frameon=True)
ax.grid()
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist_mean.pdf"))
p.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist_mean.png"))
p.close()
# Split the profiles by radius
hi_hwhms = []
co_hwhms = []
hi_lowqs = []
co_lowqs = []
hi_highqs = []
co_highqs = []
twocolumn_figure()
p.figure(1, figsize=(8.4, 11)).clf()
fig, ax = p.subplots(Nrows, Ncols,
sharex=True,
sharey=True, num=1)
fig.text(0.5, 0.04, "Distance from Skeleton (pc)", ha='center')
fig.text(0.04, 0.5, r"HI Mean Intensity (K)", va='center', rotation='vertical')
fig.text(0.96, 0.5, r"CO Mean Intensity (K)", va='center', rotation='vertical')
p.subplots_adjust(hspace=0.1,
wspace=0.1)
for ctr, (r0, r1) in enumerate(zip(inneredge,
outeredge)):
r, c = np.unravel_index(ctr, (Nrows, Ncols))
idx = np.logical_and(all_radii >= r0.value,
all_radii < r1.value)
idx = np.logical_and(selector_pts, idx)
hi_mean_rad, bin_edges, bin_num = \
binned_statistic(skeleton_dists_pix[idx],
all_vals_hi[idx],
bins=bins, statistic=np.mean)
hi_std_rad = \
binned_statistic(skeleton_dists_pix[idx],
all_vals_hi[idx],
bins=bins, statistic=np.std)[0]
co_mean_rad = \
binned_statistic(skeleton_dists_pix[idx],
all_vals_co[idx],
bins=bins, statistic=np.mean)[0]
co_std_rad = \
binned_statistic(skeleton_dists_pix[idx],
all_vals_co[idx],
bins=bins, statistic=np.std)[0]
num_in_bins_rad = np.bincount(bin_num)[1:]
# Num. indep't points divided by number of pixels in one beam.
num_indept_rad = num_in_bins / 41.
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width / 2
co_hwhm_rad = find_perc_width(bin_centers, co_mean_rad) * phys_conv.value
hi_hwhm_rad = find_perc_width(bin_centers, hi_mean_rad) * phys_conv.value
co_lowq_rad = find_perc_width(bin_centers, co_mean_rad,
level=0.25) * phys_conv.value
hi_lowq_rad = find_perc_width(bin_centers, hi_mean_rad,
level=0.25) * phys_conv.value
co_highq_rad = find_perc_width(bin_centers, co_mean_rad,
level=0.75) * phys_conv.value
hi_highq_rad = find_perc_width(bin_centers, hi_mean_rad,
level=0.75) * phys_conv.value
co_hwhms.append(co_hwhm_rad)
hi_hwhms.append(hi_hwhm_rad)
co_lowqs.append(co_lowq_rad)
hi_lowqs.append(hi_lowq_rad)
co_highqs.append(co_highq_rad)
hi_highqs.append(hi_highq_rad)
ax[r, c].axvline(hi_hwhm_rad, linestyle='-', color=col_pal[0],
linewidth=3, alpha=0.6)
ax[r, c].axvline(co_hwhm_rad, linestyle='--', color=col_pal[1],
linewidth=3, alpha=0.6)
pl1 = ax[r, c].errorbar(bin_centers * pixscale,
hi_mean_rad,
yerr=hi_std_rad / np.sqrt(num_indept_rad),
fmt="D-", drawstyle='steps-mid',
label="HI")
ax2 = ax[r, c].twinx()
if c == 0:
ax_twin = ax2
else:
ax2.get_shared_y_axes().join(ax2, ax_twin)
if c < Ncols - 1:
ax2.tick_params(labelright='off')
pl2 = ax2.errorbar(bin_centers * pixscale,
co_mean_rad,
yerr=co_std_rad / np.sqrt(num_indept_rad),
fmt="o--", drawstyle='steps-mid',
label="CO(2-1)", color=col_pal[1])
ax[r, c].annotate("{0} to {1}".format(r0.to(u.kpc).value, r1.to(u.kpc)),
xy=(170, 25),
color='k',
fontsize=11,
bbox={"boxstyle": "square", "facecolor": "w"})
ax[r, c].axvline(0.0, color='k')
if ctr == 0:
pls = [pl1[0], pl2[0]]
labs = ["HI", "CO"]
ax[r, c].legend(pls, labs, loc='center right', fontsize=11,
frameon=True)
ax[r, c].grid()
fig.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist_mean_byradius.pdf"))
fig.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist_mean_byradius.png"))
p.close()
# Look at the HWHM with radius
onecolumn_figure()
p.plot(inneredge.value / 1000. + 0.25, hi_lowqs, label='HI 25\%',
drawstyle='steps-mid', color=col_pal[0], linestyle='--')
p.plot(inneredge.value / 1000. + 0.25, co_lowqs, label='CO 25\%',
drawstyle='steps-mid', color=col_pal[1], linestyle='--')
p.plot(inneredge.value / 1000. + 0.25, hi_hwhms, label='HI 50\%',
drawstyle='steps-mid', color=col_pal[0], linewidth=3)
p.plot(inneredge.value / 1000. + 0.25, co_hwhms, label='CO 50\%',
drawstyle='steps-mid', color=col_pal[1], linewidth=3)
p.plot(inneredge.value / 1000. + 0.25, hi_highqs, label='HI 75\%',
drawstyle='steps-mid', color=col_pal[0], linestyle='-.')
p.plot(inneredge.value / 1000. + 0.25, co_highqs, label='CO 75\%',
drawstyle='steps-mid', color=col_pal[1], linestyle='-.')
p.grid()
p.xlim([-0.2, 8.5])
p.legend(frameon=True, loc='center right')
p.ylabel("Width (pc)")
p.xlabel("Galactocentric Radius (kpc)")
p.tight_layout()
p.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist_hwhm.pdf"))
p.savefig(osjoin(fig_path, "mask_intensity_vs_skeldist_hwhm.png"))
p.close()
# Save the table of HWHMs
radbin_centers = Column((inneredge.value + 250) / 1000., unit=u.kpc,
name='bin_cent')
hi_hwhms = Column(hi_hwhms, unit=u.pc, name='hi_hwhm')
co_hwhms = Column(co_hwhms, unit=u.pc, name='co_hwhm')
hi_lowqs = Column(hi_lowqs, unit=u.pc, name='hi_25')
co_lowqs = Column(co_lowqs, unit=u.pc, name='co_25')
hi_highqs = Column(hi_highqs, unit=u.pc, name='hi_75')
co_highqs = Column(co_highqs, unit=u.pc, name='co_75')
tab = Table([radbin_centers, hi_hwhms, co_hwhms, hi_lowqs, co_lowqs, hi_highqs, co_highqs])
tab.write(fourteenB_HI_data_wGBT_path("tables/skeleton_profile_radial_hwhm.fits", no_check=True))
default_figure()
|
e-koch/VLA_Lband
|
14B-088/HI/analysis/co_comparison/co_vs_hi_boundaries.py
|
Python
|
mit
| 28,598
|
[
"Gaussian"
] |
4116dacdc6ea09b41d55847ace5eabc414eb3c8d182798bd5d02297a0604a2f2
|
########################################################################
# $Id$
########################################################################
""" A set of common tools to be used in pilot commands
"""
import sys
import time
import os
import pickle
import getopt
import imp
import types
import urllib2
import signal
__RCSID__ = '$Id$'
def printVersion( log ):
log.info( "Running %s" % " ".join( sys.argv ) )
try:
fd = open( "%s.run" % sys.argv[0], "w" )
pickle.dump( sys.argv[1:], fd )
fd.close()
except:
pass
log.info( "Version %s" % __RCSID__ )
def pythonPathCheck():
try:
os.umask( 18 ) # 022
pythonpath = os.getenv( 'PYTHONPATH', '' ).split( ':' )
print 'Directories in PYTHONPATH:', pythonpath
for p in pythonpath:
if p == '': continue
try:
if os.path.normpath( p ) in sys.path:
# In case a given directory is twice in PYTHONPATH it has to removed only once
sys.path.remove( os.path.normpath( p ) )
except Exception, x:
print x
print "[EXCEPTION-info] Failing path:", p, os.path.normpath( p )
print "[EXCEPTION-info] sys.path:", sys.path
raise x
except Exception, x:
print x
print "[EXCEPTION-info] sys.executable:", sys.executable
print "[EXCEPTION-info] sys.version:", sys.version
print "[EXCEPTION-info] os.uname():", os.uname()
raise x
def alarmTimeoutHandler( *args ):
raise Exception( 'Timeout' )
def retrieveUrlTimeout( url, fileName, log, timeout = 0 ):
"""
Retrieve remote url to local file, with timeout wrapper
"""
urlData = ''
if timeout:
signal.signal( signal.SIGALRM, alarmTimeoutHandler )
# set timeout alarm
signal.alarm( timeout + 5 )
try:
remoteFD = urllib2.urlopen( url )
expectedBytes = 0
# Sometimes repositories do not return Content-Length parameter
try:
expectedBytes = long( remoteFD.info()[ 'Content-Length' ] )
except Exception, x:
expectedBytes = 0
data = remoteFD.read()
if fileName:
localFD = open( fileName + '-local', "wb" )
localFD.write( data )
localFD.close()
else:
urlData += data
remoteFD.close()
if len( data ) != expectedBytes and expectedBytes > 0:
log.error( 'URL retrieve: expected size does not match the received one' )
return False
if timeout:
signal.alarm( 0 )
if fileName:
return True
else:
return urlData
except urllib2.HTTPError, x:
if x.code == 404:
log.error( "URL retrieve: %s does not exist" % url )
if timeout:
signal.alarm( 0 )
return False
except urllib2.URLError:
log.error( 'Timeout after %s seconds on transfer request for "%s"' % ( str( timeout ), url ) )
return False
except Exception, x:
if x == 'Timeout':
log.error( 'Timeout after %s seconds on transfer request for "%s"' % ( str( timeout ), url ) )
if timeout:
signal.alarm( 0 )
raise x
class ObjectLoader( object ):
""" Simplified class for loading objects from a DIRAC installation.
Example:
ol = ObjectLoader()
object, modulePath = ol.loadObject( 'pilot', 'LaunchAgent' )
"""
def __init__( self, baseModules, log ):
""" init
"""
self.__rootModules = baseModules
self.log = log
def loadModule( self, modName, hideExceptions = False ):
""" Auto search which root module has to be used
"""
for rootModule in self.__rootModules:
impName = modName
if rootModule:
impName = "%s.%s" % ( rootModule, impName )
self.log.debug( "Trying to load %s" % impName )
module, parentPath = self.__recurseImport( impName, hideExceptions = hideExceptions )
#Error. Something cannot be imported. Return error
if module is None:
return None, None
#Huge success!
else:
return module, parentPath
#Nothing found, continue
#Return nothing found
return None, None
def __recurseImport( self, modName, parentModule = None, hideExceptions = False ):
""" Internal function to load modules
"""
if type( modName ) in types.StringTypes:
modName = modName.split( '.' )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError, excp:
if str( excp ).find( "No module named %s" % modName[0] ) == 0:
return None, None
errMsg = "Can't load %s in %s" % ( ".".join( modName ), parentModule.__path__[0] )
if not hideExceptions:
self.log.exception( errMsg )
return None, None
if len( modName ) == 1:
return impModule, parentModule.__path__[0]
return self.__recurseImport( modName[1:], impModule,
hideExceptions = hideExceptions )
def loadObject( self, package, moduleName, command ):
""" Load an object from inside a module
"""
loadModuleName = '%s.%s' % ( package, moduleName )
module, parentPath = self.loadModule( loadModuleName )
if module is None:
return None, None
try:
commandObj = getattr( module, command )
return commandObj, os.path.join( parentPath, moduleName )
except AttributeError, e:
self.log.error( 'Exception: %s' % str(e) )
return None, None
def getCommand( params, commandName, log ):
""" Get an instantiated command object for execution.
Commands are looked in the following modules in the order:
1. <CommandExtension>Commands
2. pilotCommands
3. <Extension>.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands
4. <Extension>.WorkloadManagementSystem.PilotAgent.pilotCommands
5. DIRAC.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands
6. DIRAC.WorkloadManagementSystem.PilotAgent.pilotCommands
Note that commands in 3.-6. can only be used of the the DIRAC installation
has been done. DIRAC extensions are taken from -e ( --extraPackages ) option
of the pilot script.
"""
extensions = params.commandExtensions
modules = [ m + 'Commands' for m in extensions + ['pilot'] ]
commandObject = None
# Look for commands in the modules in the current directory first
for module in modules:
try:
impData = imp.find_module( module )
commandModule = imp.load_module( module, *impData )
commandObject = getattr( commandModule, commandName )
except Exception, _e:
pass
if commandObject:
return commandObject( params ), module
if params.diracInstalled:
diracExtensions = []
for ext in params.extensions:
if not ext.endswith( 'DIRAC' ):
diracExtensions.append( ext + 'DIRAC' )
else:
diracExtensions.append( ext )
diracExtensions += ['DIRAC']
ol = ObjectLoader( diracExtensions, log )
for module in modules:
commandObject, modulePath = ol.loadObject( 'WorkloadManagementSystem.PilotAgent',
module,
commandName )
if commandObject:
return commandObject( params ), modulePath
# No command could be instantitated
return None, None
class Logger( object ):
""" Basic logger object, for use inside the pilot. Just using print.
"""
def __init__( self, name = 'Pilot', debugFlag = False, pilotOutput = 'pilot.out' ):
self.debugFlag = debugFlag
self.name = name
self.out = pilotOutput
def __outputMessage( self, msg, level, header ):
if self.out:
outputFile = open( self.out, 'a' )
for _line in msg.split( "\n" ):
if header:
outLine = "%s UTC %s [%s] %s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime() ),
level,
self.name,
_line )
print outLine
if self.out:
outputFile.write( outLine + '\n' )
else:
print _line
outputFile.write( _line + '\n' )
if self.out:
outputFile.close()
sys.stdout.flush()
def setDebug( self ):
self.debugFlag = True
def debug( self, msg, header = True ):
if self.debugFlag:
self.__outputMessage( msg, "DEBUG", header )
def error( self, msg, header = True ):
self.__outputMessage( msg, "ERROR", header )
def warn( self, msg, header = True ):
self.__outputMessage( msg, "WARN", header )
def info( self, msg, header = True ):
self.__outputMessage( msg, "INFO", header )
class CommandBase( object ):
""" CommandBase is the base class for every command in the pilot commands toolbox
"""
def __init__( self, pilotParams, dummy='' ):
""" c'tor
Defines the logger and the pilot parameters
"""
self.pp = pilotParams
self.log = Logger( self.__class__.__name__ )
self.debugFlag = False
for o, _ in self.pp.optList:
if o == '-d' or o == '--debug':
self.log.setDebug()
self.debugFlag = True
self.log.debug( "\n\n Initialized command %s" % self.__class__ )
def executeAndGetOutput( self, cmd, environDict = None ):
""" Execute a command on the worker node and get the output
"""
self.log.info( "Executing command %s" % cmd )
try:
import subprocess # spawn new processes, connect to their input/output/error pipes, and obtain their return codes.
_p = subprocess.Popen( "%s" % cmd, shell = True, env=environDict, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, close_fds = False )
# standard output
outData = _p.stdout.read().strip()
for line in outData:
sys.stdout.write( line )
sys.stdout.write( '\n' )
for line in _p.stderr:
sys.stdout.write( line )
sys.stdout.write( '\n' )
# return code
returnCode = _p.wait()
self.log.debug( "Return code of %s: %d" % ( cmd, returnCode ) )
return (returnCode, outData)
except ImportError:
self.log.error( "Error importing subprocess" )
def exitWithError( self, errorCode ):
""" Wrapper around sys.exit()
"""
self.log.info( "List of child processes of current PID:" )
retCode, _outData = self.executeAndGetOutput( "ps --forest -o pid,%%cpu,%%mem,tty,stat,time,cmd -g %d" % os.getpid() )
if retCode:
self.log.error( "Failed to issue ps [ERROR %d] " % retCode )
sys.exit( errorCode )
class PilotParams( object ):
""" Class that holds the structure with all the parameters to be used across all the commands
"""
MAX_CYCLES = 10
def __init__( self ):
""" c'tor
param names and defaults are defined here
"""
self.rootPath = os.getcwd()
self.originalRootPath = os.getcwd()
self.pilotRootPath = os.getcwd()
self.workingDir = os.getcwd()
self.optList = {}
self.debugFlag = False
self.local = False
self.commandExtensions = []
self.commands = ['GetPilotVersion', 'CheckWorkerNode', 'InstallDIRAC',
'ConfigureBasics', 'ConfigureSite', 'ConfigureArchitecture', 'ConfigureCPURequirements',
'LaunchAgent']
self.extensions = []
self.site = ""
self.setup = ""
self.configServer = ""
self.installation = ""
self.ceName = ""
self.ceType = ''
self.queueName = ""
self.platform = ""
self.minDiskSpace = 2560 #MB
self.jobCPUReq = 900
self.pythonVersion = '27'
self.userGroup = ""
self.userDN = ""
self.maxCycles = self.MAX_CYCLES
self.flavour = 'DIRAC'
self.gridVersion = '2014-04-09'
self.pilotReference = ''
self.releaseVersion = ''
self.releaseProject = ''
self.gateway = ""
self.useServerCertificate = False
self.pilotScriptName = ''
# DIRAC client installation environment
self.diracInstalled = False
self.diracExtensions = []
# Some commands can define environment necessary to execute subsequent commands
self.installEnv = os.environ
# If DIRAC is preinstalled this file will receive the updates of the local configuration
self.localConfigFile = ''
self.executeCmd = False
self.configureScript = 'dirac-configure'
self.architectureScript = 'dirac-platform'
self.certsLocation = '%s/etc/grid-security' % self.workingDir
self.pilotCFGFile = 'pilot.json'
self.pilotCFGFileLocation = 'http://lhcbproject.web.cern.ch/lhcbproject/dist/DIRAC3/defaults/'
# Pilot command options
self.cmdOpts = ( ( 'b', 'build', 'Force local compilation' ),
( 'd', 'debug', 'Set debug flag' ),
( 'e:', 'extraPackages=', 'Extra packages to install (comma separated)' ),
( 'E:', 'commandExtensions=', 'Python module with extra commands' ),
( 'X:', 'commands=', 'Pilot commands to execute commands' ),
( 'g:', 'grid=', 'lcg tools package version' ),
( 'h', 'help', 'Show this help' ),
( 'i:', 'python=', 'Use python<26|27> interpreter' ),
( 'l:', 'project=', 'Project to install' ),
( 'p:', 'platform=', 'Use <platform> instead of local one' ),
( 'u:', 'url=', 'Use <url> to download tarballs' ),
( 'r:', 'release=', 'DIRAC release to install' ),
( 'n:', 'name=', 'Set <Site> as Site Name' ),
( 'D:', 'disk=', 'Require at least <space> MB available' ),
( 'M:', 'MaxCycles=', 'Maximum Number of JobAgent cycles to run' ),
( 'N:', 'Name=', 'CE Name' ),
( 'Q:', 'Queue=', 'Queue name' ),
( 'y:', 'CEType=', 'CE Type (normally InProcess)' ),
( 'S:', 'setup=', 'DIRAC Setup to use' ),
( 'C:', 'configurationServer=', 'Configuration servers to use' ),
( 'T:', 'CPUTime', 'Requested CPU Time' ),
( 'G:', 'Group=', 'DIRAC Group to use' ),
( 'O:', 'OwnerDN', 'Pilot OwnerDN (for private pilots)' ),
( 'U', 'Upload', 'Upload compiled distribution (if built)' ),
( 'V:', 'installation=', 'Installation configuration file' ),
( 'W:', 'gateway=', 'Configure <gateway> as DIRAC Gateway during installation' ),
( 's:', 'section=', 'Set base section for relative parsed options' ),
( 'o:', 'option=', 'Option=value to add' ),
( 'c', 'cert', 'Use server certificate instead of proxy' ),
( 'C:', 'certLocation=', 'Specify server certificate location' ),
( 'L:', 'pilotCFGLocation=', 'Specify pilot CFG location' ),
( 'F:', 'pilotCFGFile=', 'Specify pilot CFG file' ),
( 'R:', 'reference=', 'Use this pilot reference' ),
( 'x:', 'execute=', 'Execute instead of JobAgent' ),
)
self.__initOptions()
def __initOptions( self ):
""" Parses and interpret options on the command line
"""
self.optList, __args__ = getopt.getopt( sys.argv[1:],
"".join( [ opt[0] for opt in self.cmdOpts ] ),
[ opt[1] for opt in self.cmdOpts ] )
for o, v in self.optList:
if o == '-E' or o == '--commandExtensions':
self.commandExtensions = v.split( ',' )
elif o == '-X' or o == '--commands':
self.commands = v.split( ',' )
elif o == '-e' or o == '--extraPackages':
self.extensions = v.split( ',' )
elif o == '-n' or o == '--name':
self.site = v
elif o == '-N' or o == '--Name':
self.ceName = v
elif o == '-y' or o == '--CEType':
self.ceType = v
elif o == '-Q' or o == '--Queue':
self.queueName = v
elif o == '-R' or o == '--reference':
self.pilotReference = v
elif o == '-d' or o == '--debug':
self.debugFlag = True
elif o in ( '-S', '--setup' ):
self.setup = v
elif o in ( '-C', '--configurationServer' ):
self.configServer = v
elif o in ( '-G', '--Group' ):
self.userGroup = v
elif o in ( '-x', '--execute' ):
self.executeCmd = v
elif o in ( '-O', '--OwnerDN' ):
self.userDN = v
elif o in ( '-V', '--installation' ):
self.installation = v
elif o == '-p' or o == '--platform':
self.platform = v
elif o == '-D' or o == '--disk':
try:
self.minDiskSpace = int( v )
except:
pass
elif o == '-r' or o == '--release':
self.releaseVersion = v.split(',',1)[0]
elif o in ( '-l', '--project' ):
self.releaseProject = v
elif o in ( '-W', '--gateway' ):
self.gateway = v
elif o == '-c' or o == '--cert':
self.useServerCertificate = True
elif o == '-C' or o == '--certLocation':
self.certsLocation = v
elif o == '-L' or o == '--pilotCFGLocation':
self.pilotCFGFileLocation = v
elif o == '-F' or o == '--pilotCFGFile':
self.pilotCFGFile = v
elif o == '-M' or o == '--MaxCycles':
try:
self.maxCycles = min( self.MAX_CYCLES, int( v ) )
except:
pass
elif o in ( '-T', '--CPUTime' ):
self.jobCPUReq = v
|
vmendez/DIRAC
|
WorkloadManagementSystem/PilotAgent/pilotTools.py
|
Python
|
gpl-3.0
| 17,650
|
[
"DIRAC"
] |
34a442dcb4c37a8ad60506d3c7f11118f3b8af8b533f85aa9fbea32eac9c3023
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2015 Jonathan F. Donges and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
"""
Provides classes for analyzing spatially embedded complex networks, handling
multivariate data and generating time series surrogates.
"""
# array object and fast numerics
import numpy as np
from numpy import random
# C++ inline code
from .. import weave_inline
# easy progress bar handling
from ..utils import progressbar
from .numerics import _embed_time_series_array, _recurrence_plot, _twins
#
# Define class Surrogates
#
class Surrogates(object):
"""
Encapsulates structures and methods related to surrogate time series.
Provides data structures and methods to generate surrogate data sets from a
set of time series and to evaluate the significance of various correlation
measures using these surrogates.
More information on time series surrogates can be found in [Schreiber2000]_
and [Kantz2006]_.
"""
#
# Define internal methods
#
def __init__(self, original_data, silence_level=1):
"""
Initialize an instance of Surrogates.
.. note::
The order of array dimensions is different from the standard of
``core``. Here it is [index, time] for reasons of computational
speed!
:type original_data: 2D array [index, time]
:arg original_data: The original time series for surrogate generation.
:arg int silence_level: The inverse level of verbosity of the object.
"""
if silence_level <= 1:
print "Generated an instance of the Surrogates class."
# Set class variables
self.original_data = original_data
"""The original time series for surrogate generation."""
self.silence_level = silence_level
"""(string) - The inverse level of verbosity of the object."""
# Set flags
self._normalized = False
self._fft_cached = False
self._twins_cached = False
# Cache
self._twins = None
self._original_data_fft = None
def __str__(self):
"""
Returns a string representation.
"""
return 'Surrogates: time series shape %s.' % (
self.original_data.shape,)
def clear_cache(self):
"""Clean up cache."""
try:
del self._original_data_fft
del self._twins
except AttributeError:
pass
#
# Methods for testing purposes
#
@staticmethod
def SmallTestData():
"""
Return Surrogates instance representing test a data set of 6 time
series.
:rtype: Surrogates instance
:return: a Surrogates instance for testing purposes.
"""
# Create time series
ts = np.zeros((6, 200))
for i in xrange(6):
ts[i, :] = np.sin(np.arange(200)*np.pi/15. + i*np.pi/2.) + \
np.sin(np.arange(200) * np.pi / 30.)
return Surrogates(original_data=ts, silence_level=2)
#
# Define methods to normalize and analyze the data
#
@staticmethod
def normalize_time_series_array(time_series_array):
"""
:index:`Normalize <pair: normalize; time series array>` an array of
time series to zero mean and unit variance individually for each
individual time series.
**Modifies the given array in place!**
**Examples:**
>>> ts = Surrogates.SmallTestData().original_data
>>> Surrogates.SmallTestData().normalize_time_series_array(ts)
>>> r(ts.mean(axis=1))
array([ 0., 0., 0., 0., 0., 0.])
>>> r(ts.std(axis=1))
array([ 1., 1., 1., 1., 1., 1.])
:type time_series_array: 2D array [index, time]
:arg time_series_array: The time series array to be normalized.
"""
mean = time_series_array.mean(axis=1)
std = time_series_array.std(axis=1)
for i in xrange(time_series_array.shape[0]):
# Remove mean value from time series at each node (grid point)
time_series_array[i, :] -= mean[i]
# Normalize the standard deviation of anomalies to one
if std[i] != 0:
time_series_array[i, :] /= std[i]
def embed_time_series_array(self, time_series_array, dimension, delay):
"""
Return a :index:`delay embedding` of all time series.
.. note::
Only works for scalar time series!
**Example:**
>>> ts = Surrogates.SmallTestData().original_data
>>> Surrogates.SmallTestData().embed_time_series_array(
... time_series_array=ts, dimension=3, delay=2)[0,:6,:]
array([[ 0. , 0.61464833, 1.14988147],
[ 0.31244015, 0.89680225, 1.3660254 ],
[ 0.61464833, 1.14988147, 1.53884177],
[ 0.89680225, 1.3660254 , 1.6636525 ],
[ 1.14988147, 1.53884177, 1.73766672],
[ 1.3660254 , 1.6636525 , 1.76007351]])
:type time_series_array: 2D array [index, time]
:arg time_series_array: The time series array to be normalized.
:arg int dimension: The embedding dimension.
:arg int delay: The embedding delay.
:rtype: 3D array [index, time, dimension]
:return: the embedded time series.
"""
if self.silence_level <= 1:
print "Embedding all time series in dimension", dimension, \
"and with lag", delay, "..."
(N, n_time) = time_series_array.shape
embedding = np.empty((N, n_time - (dimension - 1)*delay, dimension))
_embed_time_series_array(N, n_time, dimension, delay,
time_series_array, embedding)
return embedding
# FIXME: I(wb) included the line
# dimension = embedding.shape[1]
# whose missing caused an error. I can't guarantee if it is correct.
def recurrence_plot(self, embedding, threshold):
"""
Return the :index:`recurrence plot <pair: recurrence plot; time
series>` from an embedding of a time series.
Uses supremum norm.
:type embedding: 2D array [time, dimension]
:arg embedding: The embedded time series.
:arg float threshold: The recurrence threshold.
:rtype: 2D array [time, time]
:return: the recurrence matrix.
"""
if self.silence_level <= 1:
print "Calculating the recurrence plot..."
n_time = embedding.shape[0]
dimension = embedding.shape[1]
R = np.ones((n_time, n_time), dtype="int8")
_recurrence_plot(n_time, dimension, threshold, embedding, R)
return R
# FIXME: I(wb) included the line
# dimension = embedding_array.shape[2]
# whose missing caused an error. I can't guarantee if it is correct.
def twins(self, embedding_array, threshold, min_dist=7):
"""
Return list of the :index:`twins <pair: twins; surrogates>` of each
state vector for all time series.
Two state vectors are said to be twins if they share the same
recurrences, i.e., if the corresponding rows or columns in the
recurrence plot are identical.
References: [Thiel2006]_, [Marwan2007]_.
:type embedding_array: 3D array [index, time, dimension]
:arg embedding_array: The embedded time series array.
:arg float threshold: The recurrence threshold.
:arg number min_dist: The minimum temporal distance for twins.
:rtype: [[number]]
:return: the list of twins for each state vector in the time series.
"""
if self.silence_level <= 1:
print "Finding twins..."
N = embedding_array.shape[0]
n_time = embedding_array.shape[1]
dimension = embedding_array.shape[2]
twins = []
# Initialize the R matrix with ones
R = np.empty((n_time, n_time))
# Initialize array to store the number of neighbors for each sample
nR = np.empty(n_time)
_twins(N, n_time, dimension, threshold, min_dist, embedding_array, R,
nR, twins)
return twins
#
# Define methods to generate sets of surrogate time series
#
def white_noise_surrogates(self, original_data):
"""
Return a shuffled copy of a time series array.
Each time series is shuffled individually. The surrogates correspond to
realizations of white noise consistent with the :attr:`original_data`
time series' amplitude distribution.
**Example** (Distributions of white noise surrogates should the same as
for the original data):
>>> ts = Surrogates.SmallTestData().original_data
>>> surrogates = Surrogates.\
SmallTestData().white_noise_surrogates(ts)
>>> np.histogram(ts[0,:])[0]
array([21, 12, 9, 15, 33, 36, 18, 12, 16, 28])
>>> np.histogram(surrogates[0,:])[0]
array([21, 12, 9, 15, 33, 36, 18, 12, 16, 28])
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:rtype: 2D array [index, time]
:return: The surrogate time series.
"""
if self.silence_level <= 1:
print "Generating white noise surrogates by random shuffling..."
# Generate reference to shuffle function
shuffle = random.shuffle
surrogates = original_data.copy()
for i in xrange(surrogates.shape[0]):
shuffle(surrogates[i, :])
return surrogates
def correlated_noise_surrogates(self, original_data):
"""
Return Fourier surrogates.
Generate surrogates by Fourier transforming the :attr:`original_data`
time series (assumed to be real valued), randomizing the phases and
then applying an inverse Fourier transform. Correlated noise surrogates
share their power spectrum and autocorrelation function with the
original_data time series.
The Fast Fourier transforms of all time series are cached to facilitate
a faster generation of several surrogates for each time series. Hence,
:meth:`clear_cache` has to be called before generating surrogates from
a different set of time series!
.. note::
The amplitudes are not adjusted here, i.e., the
individual amplitude distributions are not conserved!
**Examples:**
The power spectrum is conserved up to small numerical deviations:
>>> ts = Surrogates.SmallTestData().original_data
>>> surrogates = Surrogates.\
SmallTestData().correlated_noise_surrogates(ts)
>>> all(r(np.abs(np.fft.fft(ts, axis=1))[0,1:10]) == \
r(np.abs(np.fft.fft(surrogates, axis=1))[0,1:10]))
True
However, the time series amplitude distributions differ:
>>> all(np.histogram(ts[0,:])[0] == np.histogram(surrogates[0,:])[0])
False
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:rtype: 2D array [index, time]
:return: The surrogate time series.
"""
if self.silence_level <= 1:
print "Generating correlated noise surrogates..."
# Calculate FFT of original_data time series
# The FFT of the original_data data has to be calculated only once,
# so it is stored in self._original_data_fft.
if self._fft_cached:
surrogates = self._original_data_fft
else:
surrogates = np.fft.rfft(original_data, axis=1)
self._original_data_fft = surrogates
self._fft_cached = True
# Get shapes
(N, n_time) = original_data.shape
len_phase = surrogates.shape[1]
# Generate random phases uniformly distributed in the
# interval [0, 2*Pi]
phases = random.uniform(low=0, high=2 * np.pi, size=(N, len_phase))
# Add random phases uniformly distributed in the interval [0, 2*Pi]
surrogates *= np.exp(1j * phases)
# Calculate IFFT and take the real part, the remaining imaginary part
# is due to numerical errors.
return np.ascontiguousarray(np.real(np.fft.irfft(surrogates, n=n_time,
axis=1)))
def AAFT_surrogates(self, original_data):
"""
Return surrogates using the amplitude adjusted Fourier transform
method.
Reference: [Schreiber2000]_
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:rtype: 2D array [index, time]
:return: The surrogate time series.
"""
# Create sorted Gaussian reference series
gaussian = random.randn(original_data.shape[0], original_data.shape[1])
gaussian.sort(axis=1)
# Rescale data to Gaussian distribution
ranks = original_data.argsort(axis=1).argsort(axis=1)
rescaled_data = np.zeros(original_data.shape)
for i in xrange(original_data.shape[0]):
rescaled_data[i, :] = gaussian[i, ranks[i, :]]
# Phase randomize rescaled data
phase_randomized_data = \
self.correlated_noise_surrogates(rescaled_data)
# Rescale back to amplitude distribution of original data
sorted_original = original_data.copy()
sorted_original.sort(axis=1)
ranks = phase_randomized_data.argsort(axis=1).argsort(axis=1)
for i in xrange(original_data.shape[0]):
rescaled_data[i, :] = sorted_original[i, ranks[i, :]]
return rescaled_data
def refined_AAFT_surrogates(self, original_data, n_iterations,
output="true_amplitudes"):
"""
Return surrogates using the iteratively refined amplitude adjusted
Fourier transform method.
A set of AAFT surrogates (:meth:`AAFT_surrogates`) is iteratively
refined to produce a closer match of both amplitude distribution and
power spectrum of surrogate and original data.
Reference: [Schreiber2000]_
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:arg int n_iterations: Number of iterations / refinement steps
:arg str output: Type of surrogate to return. "true_amplitudes":
surrogates with correct amplitude distribution, "true_spectrum":
surrogates with correct power spectrum, "both": return both outputs
of the algorithm.
:rtype: 2D array [index, time]
:return: The surrogate time series.
"""
# Get size of dimensions
n_time = original_data.shape[1]
# Get Fourier transform of original data with caching
if self._fft_cached:
fourier_transform = self._original_data_fft
else:
fourier_transform = np.fft.rfft(original_data, axis=1)
self._original_data_fft = fourier_transform
self._fft_cached = True
# Get Fourier amplitudes
original_fourier_amps = np.abs(fourier_transform)
# Get sorted copy of original data
sorted_original = original_data.copy()
sorted_original.sort(axis=1)
# Get starting point / initial conditions for R surrogates
# (see [Schreiber2000]_)
R = self.AAFT_surrogates(original_data)
# Start iteration
for i in xrange(n_iterations):
# Get Fourier phases of R surrogate
r_fft = np.fft.rfft(R, axis=1)
r_phases = r_fft / np.abs(r_fft)
# Transform back, replacing the actual amplitudes by the desired
# ones, but keeping the phases exp(iψ(i)
s = np.fft.irfft(original_fourier_amps * r_phases, n=n_time,
axis=1)
# Rescale to desired amplitude distribution
ranks = s.argsort(axis=1).argsort(axis=1)
for j in xrange(original_data.shape[0]):
R[j, :] = sorted_original[j, ranks[j, :]]
if output == "true_amplitudes":
return R
elif output == "true_spectrum":
return s
elif output == "both":
return (R, s)
else:
return (R, s)
def twin_surrogates(self, original_data, dimension, delay, threshold,
min_dist=7):
"""
Return surrogates using the twin surrogate method.
Scalar twin surrogates are created by isolating the first component
(dimension) of the twin surrogate trajectories.
Twin surrogates share linear and nonlinear properties with the original
time series, since they correspond to realizations of trajectories of
the same dynamical systems with different initial conditions.
References: [Thiel2006]_ [*], [Marwan2007]_.
The twin lists of all time series are cached to facilitate a faster
generation of several surrogates for each time series. Hence,
:meth:`clear_cache` has to be called before generating twin surrogates
from a different set of time series!
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:arg int dimension: The embedding dimension.
:arg int delay: The embedding delay.
:arg float threshold: The recurrence threshold.
:arg number min_dist: The minimum temporal distance for twins.
:rtype: 2D array [index, time]
:return: the twin surrogates.
"""
# The algorithm proceeds in several steps:
# 1. Embed the original_data time series, using time delay embedding
# for simplicity. Use the same dimension and time delay delay for
# all time series for simplicity. Determine delay using time
# delayed mutual information and d using false nearest neighbors
# methods.
# 2. Use the algorithm proposed in [*] to find twins
# 3. Reconstruct one-dimensional twin surrogate time series
(N, n_time) = original_data.shape
# Make sure that twins are calculated only once
if self._twins_cached:
twins = self._twins
else:
embedding = self.embed_time_series_array(original_data,
dimension, delay)
twins = self.twins(embedding, threshold, min_dist)
self._twins = twins
self._twins_cached = True
surrogates = np.empty(original_data.shape)
code = r"""
int i, j, k, new_k, n_twins, rand;
// Initialize random number generator
srand48(time(0));
for (i = 0; i < N; i++) {
// Get the twin list for time series i
py::list twins_i = PyList_GetItem(twins, i);
// Randomly choose a starting point in the original_data
// trajectory.
k = floor(drand48() * n_time);
j = 0;
while (j < n_time) {
surrogates(i,j) = original_data(i,k);
// Get the list of twins of sample k in the original_data
// time series.
py::list twins_ik = PyList_GetItem(twins_i,k);
// Get the number of twins of k
n_twins = PyList_Size(twins_ik);
// If k has no twins, go to the next sample k+1. If k has
// twins at m, choose among m+1 and k+1 with equal probability
if (n_twins == 0)
k++;
else {
// Generate a random integer between 0 and n_twins
rand = floor(drand48() * (n_twins + 1));
// If rand = n_twins go to sample k+1, otherwise jump
// to the future of one of the twins.
if (rand == n_twins)
k++;
else {
k = twins_ik[rand];
k++;
}
}
// If the new k >= n_time, choose a new random starting point
// in the original_data time series.
if (k >= n_time) {
do {
new_k = floor(drand48() * n_time);
}
while (k == new_k);
k = new_k;
}
j++;
}
}
"""
weave_inline(locals(), code,
['N', 'n_time', 'original_data', 'twins', 'surrogates'])
return surrogates
#
# Defines methods to generate correlation measure matrices based on
# original_data and surrogate data for significance testing.
#
@staticmethod
def eval_fast_code(function, original_data, surrogates):
"""
Evaluate performance of fast and slow versions of algorithms.
Designed for evaluating fast and dirty C code against cleaner code
using Blitz arrays. Does some profiling and returns the total error
between the results.
:type function: Python function
:arg function: The function to be evaluated.
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:type surrogates: 2D array [index, time]
:arg surrogates: The surrogate time series.
:return float: The total squared difference between resulting matrices.
"""
# Some profiling
# profile.run("fastResult = function(original_data, surrogates,
# fast=True)")
# profile.run("slowResult = function(original_data, surrogates,
# fast=False)")
fast_result = function(original_data, surrogates, fast=True)
slow_result = function(original_data, surrogates, fast=False)
# Return error
return np.sqrt(((fast_result - slow_result)**2).sum())
@staticmethod
def test_pearson_correlation(original_data, surrogates, fast=True):
"""
Return a test matrix of the Pearson correlation coefficient (zero lag).
The test matrix's entry :math:`(i,j)` contains the Pearson correlation
coefficient between original time series i and surrogate time series j
at lag zero. The resulting matrix is useful for significance tests
based on the Pearson correlation matrix of the original data.
.. note::
Assumes, that original_data and surrogates are already normalized.
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:type surrogates: 2D array [index, time]
:arg surrogates: The surrogate time series.
:rtype: 2D array [index, index]
:return: the Pearson correlation test matrix.
"""
(N, n_time) = original_data.shape
norm = 1. / float(n_time)
# Initialize Pearson correlation matrix
correlation = np.zeros((N, N), dtype="float32")
# correlation[i,j] gives the Pearson correlation coefficient between
# the ith original_data time series and the jth surrogate time series
code = r"""
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if (i != j) {
for (int k = 0; k < n_time; k++) {
correlation(i,j) += original_data(i,k) *
surrogates(j,k);
}
correlation(i,j) *= norm;
}
}
}
"""
# Some faster weave inline code accessing Numpy arrays directly in C
# using pointer arithmetic.
# If the arrays are of C type, the last index varies the fastest!
# For this code to work correctly, arrays have to contiguous and of
# C type!!!
fastCode = """
float *p_correlation;
double *p_original, *p_surrogates;
for (int i = 0; i < N; i++) {
// Set pointer to correlation(i,0)
p_correlation = correlation + i*N;
for (int j = 0; j < N; j++) {
if (i != j) {
// Set pointer to original_data(i,0)
p_original = original_data + i*n_time;
// Set pointer to surrogates(j,0)
p_surrogates = surrogates + j*n_time;
for (int k = 0; k < n_time; k++) {
*p_correlation += (*p_original) * (*p_surrogates);
// Set pointer to original_data(i,k+1)
p_original++;
// Set pointer to surrogates(j,k+1)
p_surrogates++;
}
*p_correlation *= norm;
}
p_correlation++;
}
}
"""
args = ['original_data', 'surrogates', 'correlation', 'n_time', 'N',
'norm']
if fast:
weave_inline(locals(), fastCode, args, blitz=False)
else:
weave_inline(locals(), code, args)
return correlation
@staticmethod
def test_mutual_information(original_data, surrogates, n_bins=32,
fast=True):
"""
Return a test matrix of mutual information (zero lag).
The test matrix's entry :math:`(i,j)` contains the mutual information
between original time series i and surrogate time series j at zero lag.
The resulting matrix is useful for significance tests based on the
mutual information matrix of the original data.
.. note::
Assumes, that original_data and surrogates are already normalized.
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:type surrogates: 2D Numpy array [index, time]
:arg surrogates: The surrogate time series.
:arg int n_bins: Number of bins for estimating prob. distributions.
:arg bool fast: fast or slow algorithm to be used.
:rtype: 2D array [index, index]
:return: the mutual information test matrix.
"""
(N, n_time) = original_data.shape
# Get common range for all histograms
range_min = float(np.min(original_data.min(), surrogates.min()))
range_max = float(np.max(original_data.max(), surrogates.max()))
# Rescale all time series to the interval [0,1], using the maximum
# range of the whole dataset
scaling = 1. / (range_max - range_min)
# Create arrays to hold symbolic trajectories
symbolic_original = np.empty(original_data.shape, dtype="int32")
symbolic_surrogates = np.empty(original_data.shape, dtype="int32")
# Initialize array to hold 1d-histograms of individual time series
hist_original = np.zeros((N, n_bins), dtype="int32")
hist_surrogates = np.zeros((N, n_bins), dtype="int32")
# Initialize array to hold 2d-histogram for one pair of time series
hist2d = np.zeros((n_bins, n_bins), dtype="int32")
# Initialize mutual information array
mi = np.zeros((N, N), dtype="float32")
# Calculate symbolic time series and histograms
# Calculate 2D histograms and mutual information
# mi[i,j] gives the mutual information between the ith original_data
# time series and the jth surrogate time series.
code = r"""
int i, j, k, l, m;
int symbol, symbol_i, symbol_j;
double rescaled, norm, hpl, hpm, plm;
// Calculate histogram norm
norm = 1.0 / n_time;
for (i = 0; i < N; i++) {
for (k = 0; k < n_time; k++) {
// Original time series
// Calculate symbolic trajectories for each time series,
// where the symbols are bins
rescaled = scaling * (original_data(i,k) - range_min);
if (rescaled< 1.0)
symbolic_original(i,k) = rescaled * n_bins;
else
symbolic_original(i,k) = n_bins - 1;
// Calculate 1d-histograms for single time series
symbol = symbolic_original(i,k);
hist_original(i,symbol) += 1;
// Surrogate time series
// Calculate symbolic trajectories for each time series,
// where the symbols are bins
rescaled = scaling * (surrogates(i,k) - range_min);
if (rescaled < 1.0)
symbolic_surrogates(i,k) = rescaled * n_bins;
else
symbolic_surrogates(i,k) = n_bins - 1;
// Calculate 1d-histograms for single time series
symbol = symbolic_surrogates(i,k);
hist_surrogates(i,symbol) += 1;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
// The case i = j is not of interest here!
if (i != j) {
// Calculate 2d-histogram for one pair of time series
// (i,j).
for (k = 0; k < n_time; k++) {
symbol_i = symbolic_original(i,k);
symbol_j = symbolic_surrogates(j,k);
hist2d(symbol_i,symbol_j) += 1;
}
// Calculate mutual information for one pair of time
// series (i,j).
for (l = 0; l < n_bins; l++) {
hpl = hist_original(i,l) * norm;
if (hpl > 0.0) {
for (m = 0; m < n_bins; m++) {
hpm = hist_surrogates(j,m) * norm;
if (hpm > 0.0) {
plm = hist2d(l,m) * norm;
if (plm > 0.0) {
mi(i,j) += plm * log(plm/hpm/hpl);
}
}
}
}
}
// Reset hist2d to zero in all bins
for (l = 0; l < n_bins; l++) {
for (m = 0; m < n_bins; m++)
hist2d(l,m) = 0;
}
}
}
}
"""
# original_data and surrogates must be contiguous Numpy arrays for
# this code to work correctly!
# All other arrays are generated from scratch in this method and
# are guaranteed to be contiguous by np.
fastCode = r"""
long i, j, k, l, m, in_bins, jn_bins, in_time, jn_time;
double norm, rescaled, hpl, hpm, plm;
double *p_original, *p_surrogates;
float *p_mi;
long *p_symbolic_original, *p_symbolic_surrogates, *p_hist_original,
*p_hist_surrogates, *p_hist2d;
// Calculate histogram norm
norm = 1.0 / n_time;
// Initialize in_bins, in_time
in_time = in_bins = 0;
for (i = 0; i < N; i++) {
// Set pointer to original_data(i,0)
p_original = original_data + in_time;
// Set pointer to surrogates(i,0)
p_surrogates = surrogates + in_time;
// Set pointer to symbolic_original(i,0)
p_symbolic_original = symbolic_original + in_time;
// Set pointer to symbolic_surrogates(i,0)
p_symbolic_surrogates = symbolic_surrogates + in_time;
for (k = 0; k < n_time; k++) {
// Rescale sample into interval [0,1]
rescaled = scaling * (*p_original - range_min);
// Calculate symbolic trajectories for each time series,
// where the symbols are bin numbers.
if (rescaled < 1.0)
*p_symbolic_original = rescaled * n_bins;
else
*p_symbolic_original = n_bins - 1;
// Calculate 1d-histograms for single time series
// Set pointer to hist_original(i, *p_symbolic_original)
p_hist_original = hist_original + in_bins
+ *p_symbolic_original;
(*p_hist_original)++;
// Rescale sample into interval [0,1]
rescaled = scaling * (*p_surrogates - range_min);
// Calculate symbolic trajectories for each time series,
// where the symbols are bin numbers.
if (rescaled < 1.0)
*p_symbolic_surrogates = rescaled * n_bins;
else
*p_symbolic_surrogates = n_bins - 1;
// Calculate 1d-histograms for single time series
// Set pointer to hist_surrogates(i, *p_symbolic_surrogates)
p_hist_surrogates = hist_surrogates + in_bins
+ *p_symbolic_surrogates;
(*p_hist_surrogates)++;
// Set pointer to original_data(i,k+1)
p_original++;
// Set pointer to surrogates(i,k+1)
p_surrogates++;
// Set pointer to symbolic_original(i,k+1)
p_symbolic_original++;
// Set pointer to symbolic_surrogates(i,k+1)
p_symbolic_surrogates++;
}
in_bins += n_bins;
in_time += n_time;
}
// Initialize in_time, in_bins
in_time = in_bins = 0;
for (i = 0; i < N; i++) {
// Set pointer to mi(i,0)
p_mi = mi + i*N;
// Initialize jn_time = 0;
jn_time = jn_bins = 0;
for (j = 0; j < N; j++) {
// Don't do anything if i = j, this case is not of
// interest here!
if (i != j) {
// Set pointer to symbolic_original(i,0)
p_symbolic_original = symbolic_original + in_time;
// Set pointer to symbolic_surrogates(j,0)
p_symbolic_surrogates = symbolic_surrogates + jn_time;
// Calculate 2d-histogram for one pair of time series
// (i,j).
for (k = 0; k < n_time; k++) {
// Set pointer to hist2d(*p_symbolic_original,
// *p_symbolic_surrogates)
p_hist2d = hist2d + (*p_symbolic_original)*n_bins
+ *p_symbolic_surrogates;
(*p_hist2d)++;
// Set pointer to symbolic_original(i,k+1)
p_symbolic_original++;
// Set pointer to symbolic_surrogates(j,k+1)
p_symbolic_surrogates++;
}
// Calculate mutual information for one pair of time
// series (i,j)
// Set pointer to hist_original(i,0)
p_hist_original = hist_original + in_bins;
for (l = 0; l < n_bins; l++) {
// Set pointer to hist_surrogates(j,0)
p_hist_surrogates = hist_surrogates + jn_bins;
// Set pointer to hist2d(l,0)
p_hist2d = hist2d + l*n_bins;
hpl = (*p_hist_original) * norm;
if (hpl > 0.0) {
for (m = 0; m < n_bins; m++) {
hpm = (*p_hist_surrogates) * norm;
if (hpm > 0.0) {
plm = (*p_hist2d) * norm;
if (plm > 0.0)
*p_mi += plm * log(plm/hpm/hpl);
}
// Set pointer to hist_surrogates(j,m+1)
p_hist_surrogates++;
// Set pointer to hist2d(l,m+1)
p_hist2d++;
}
}
// Set pointer to hist_original(i,l+1)
p_hist_original++;
}
// Reset hist2d to zero in all bins
for (l = 0; l < n_bins; l++) {
// Set pointer to hist2d(l,0)
p_hist2d = hist2d + l*n_bins;
for (m = 0; m < n_bins; m++) {
*p_hist2d = 0;
// Set pointer to hist2d(l,m+1)
p_hist2d++;
}
}
}
// Set pointer to mi(i,j+1)
p_mi++;
jn_time += n_time;
jn_bins += n_bins;
}
in_time += n_time;
in_bins += n_bins;
}
"""
args = ['n_time', 'N', 'n_bins', 'scaling', 'range_min',
'original_data', 'surrogates', 'symbolic_original',
'symbolic_surrogates', 'hist_original', 'hist_surrogates',
'hist2d', 'mi']
if fast:
weave_inline(locals(), fastCode, args, blitz=False)
else:
weave_inline(locals(), code, args)
return mi
#
# Define methods to perform significance tests on correlation measures
# based on surrogates.
#
def original_distribution(self, test_function, original_data, n_bins=100):
"""
Return a normalized histogram of a similarity measure matrix.
The absolute value of the similarity measure is used, since only the
degree of similarity was of interest originally.
:type test_function: Python function
:arg test_function: The function implementing the similarity measure.
:type original_data: 2D array [index, time]
:arg original_data: The original time series.
:arg int n_bins: The number of bins for estimating prob. distributions.
:rtype: tuple of 1D arrays ([bins],[bins])
:return: the similarity measure histogram and lower bin boundaries.
"""
if self.silence_level <= 1:
print "Estimating probability density distribution of \
original_data data..."
# Normalize original_data time series to zero mean and unit variance
if not self._normalized:
self.normalize_time_series_array(original_data)
self._normalized = True
correlation_measure = np.abs(test_function(original_data,
original_data))
(hist, lbb) = np.histogram(correlation_measure, n_bins, normed=True)
# Normalize
hist /= hist.sum()
lbb = lbb[:-1]
return (hist, lbb)
def test_threshold_significance(self, surrogate_function, test_function,
realizations=1, n_bins=100,
interval=(-1, 1)):
"""
Return a test distribution for a similarity measure.
Perform a significance test on the values of a correlation measure
based on original_data time series and surrogate data. Returns a
density estimate (histogram) of the absolute value of the correlation
measure over all realizations.
The resulting distribution of the values of similarity measure from
original and surrogate time series is of use for testing the
statistical significance of a selected threshold value for climate
network generation.
:type surrogate_function: Python function
:arg surrogate_function: The function implementing the surrogates.
:type test_function: Python function
:arg test_function: The function implementing the similarity measure.
:arg int realizations: The number of surrogates to be created for each
time series.
:arg int n_bins: The number of bins for estimating probability
distribution of test similarity measure.
:type interval: (float, float)
:arg interval: The range over which to estimate similarity measure
distribution.
:rtype: tuple of 1D arrays ([bins],[bins])
:return: similarity measure test histogram and lower bin boundaries.
"""
if self.silence_level <= 1:
print "Starting significance test based on", realizations, \
"realizations of surrogates..."
original_data = self.original_data
self._fft_cached = False
self._twins_cached = False
# Create reference to np.histogram function
numpy_hist = np.histogram
# Normalize original_data time series to zero mean and unit variance
if not self._normalized:
self.normalize_time_series_array(original_data)
self._normalized = True
# Initialize density estimate
density_estimate = np.zeros(n_bins)
# Initialize progress bar
if self.silence_level <= 2:
progress = progressbar.ProgressBar(maxval=realizations).start()
for i in xrange(realizations):
# Update progress bar
if self.silence_level <= 2:
progress.update(i)
# Get the surrogate
# Mean and variance are conserved by all surrogates
surrogates = surrogate_function(original_data)
# Get the correlation measure test matrix
correlation_measure_test = np.abs(test_function(original_data,
surrogates))
# Test if correlation measure values are outside range
if correlation_measure_test.min() < interval[0]:
print "Warning! Correlation measure value left of range."
if correlation_measure_test.max() > interval[1]:
print "Warning! Correlation measure value right of range."
# Estimate density of current realization
(hist, lbb) = numpy_hist(correlation_measure_test, n_bins,
interval, normed=True)
# Add to density estimate over all realizations
density_estimate += hist
# Clean up (should be done automatically by Python,
# but you never know...)
del surrogates, correlation_measure_test
if self.silence_level <= 2:
progress.finish()
# Normalize density estimate
density_estimate /= density_estimate.sum()
lbb = lbb[:-1]
return (density_estimate, lbb)
|
wbarfuss/pyunicorn
|
pyunicorn/timeseries/surrogates.py
|
Python
|
bsd-3-clause
| 43,471
|
[
"Gaussian"
] |
e69799395d9efee23bcd459d291fbbe1077be6f64c022c57f20d1c38a1d02141
|
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
from dpp import sample_dpp
def select_diverse(X, k):
m = X.shape[0]
sigma = 0.1*np.mean(pairwise_distances(X)+np.identity(m))
sample = [np.random.choice(m)] # randomly select the first sample
for i in range(1, k):
X_selected = X[sample].reshape((i,-1))
kde = KernelDensity(kernel='gaussian', bandwidth=sigma).fit(X_selected)
kde_scores = np.exp(kde.score_samples(X))
sample.append(np.argmin(kde_scores))
return sample
def test(k, sampling='grid'):
from itertools import product
if sampling == 'gaussian':
X = np.random.normal(scale=.1, size=(1000, 2))
else:
# Genetate grid
x = np.arange(0, 1.1, 0.1)
y = np.arange(0, 1.1, 0.1)
X = np.array(list(product(x, y)))
sample = select_diverse(X, k)
M = np.exp(-pairwise_distances(X)**2/(10./k)**2)
dpp = sample_dpp(M, k)
rand = np.random.choice(X.shape[0], k)
# Plot results
mn = np.min(X, axis=0)-0.1
mx = np.max(X, axis=0)+0.1
plt.figure()
plt.subplot(131)
plt.plot(X[sample,0],X[sample,1],'o',)
plt.plot(X[:,0],X[:,1],'g.', alpha=0.5)
plt.title('Sample from the KDE')
plt.xlim(mn[0], mx[0])
plt.ylim(mn[1], mx[1])
plt.subplot(132)
plt.plot(X[dpp,0],X[dpp,1],'o',)
plt.plot(X[:,0],X[:,1],'g.', alpha=0.5)
plt.title('Sample from the k-DPP')
plt.xlim(mn[0], mx[0])
plt.ylim(mn[1], mx[1])
plt.subplot(133)
plt.plot(X[rand,0],X[rand,1],'o',)
plt.plot(X[:,0],X[:,1],'g.', alpha=0.5)
plt.title('Random sampling')
plt.xlim(mn[0], mx[0])
plt.ylim(mn[1], mx[1])
if __name__ == "__main__":
k = 10
test(k, sampling='gaussian')
# from synthesis import synthesize_shape, save_plot
#
# a = 0.1
# A = (1+2*a)*np.random.rand(1000,3)-a # Specify shape attributes here
# model_name = 'PCA'
# c = 0
#
# X, indices = synthesize_shape(A, c=0, model_name='PCA')
# A = A[indices] # set of valid attributes
# X = X[indices] # set of valid shapes
#
# M = np.exp(-pairwise_distances(A)**2/sigma**2) # gaussian kernel
# Y = sample_dpp(M, k)
#
# save_plot(A[Y], X[Y], c=c, model_name=model_name)
|
IDEALLab/domain_expansion_jmd_2017
|
diverse_selection.py
|
Python
|
mit
| 2,406
|
[
"Gaussian"
] |
82cace507860b9bcbee9265d877df72cc0708154f3e4996de8a1a9b21ffa74d7
|
from math import pi
import numpy as np
from ase.atoms import Atoms
from ase.calculators.calculator import Calculator
def make_test_dft_calculation():
a = b = 2.0
c = 6.0
atoms = Atoms(positions=[(0, 0, c / 2)],
symbols='H',
pbc=(1, 1, 0),
cell=(a, b, c),
calculator=TestCalculator())
return atoms
class TestCalculator:
def __init__(self, nk=8):
assert nk % 2 == 0
bzk = []
weights = []
ibzk = []
w = 1.0 / nk**2
for i in range(-nk + 1, nk, 2):
for j in range(-nk + 1, nk, 2):
k = (0.5 * i / nk, 0.5 * j / nk, 0)
bzk.append(k)
if i >= j > 0:
ibzk.append(k)
if i == j:
weights.append(4 * w)
else:
weights.append(8 * w)
assert abs(sum(weights) - 1.0) < 1e-12
self.bzk = np.array(bzk)
self.ibzk = np.array(ibzk)
self.weights = np.array(weights)
# Calculate eigenvalues and wave functions:
self.init()
def init(self):
nibzk = len(self.weights)
nbands = 1
V = -1.0
self.eps = 2 * V * (np.cos(2 * pi * self.ibzk[:, 0]) +
np.cos(2 * pi * self.ibzk[:, 1]))
self.eps.shape = (nibzk, nbands)
self.psi = np.zeros((nibzk, 20, 20, 60), complex)
phi = np.empty((2, 2, 20, 20, 60))
z = np.linspace(-1.5, 1.5, 60, endpoint=False)
for i in range(2):
x = np.linspace(0, 1, 20, endpoint=False) - i
for j in range(2):
y = np.linspace(0, 1, 20, endpoint=False) - j
r = (((x[:, None]**2 +
y**2)[:, :, None] +
z**2)**0.5).clip(0, 1)
phi = 1.0 - r**2 * (3.0 - 2.0 * r)
phase = np.exp(pi * 2j * np.dot(self.ibzk, (i, j, 0)))
self.psi += phase[:, None, None, None] * phi
def get_pseudo_wave_function(self, band=0, kpt=0, spin=0):
assert spin == 0 and band == 0
return self.psi[kpt]
def get_eigenvalues(self, kpt=0, spin=0):
assert spin == 0
return self.eps[kpt]
def get_number_of_bands(self):
return 1
def get_k_point_weights(self):
return self.weights
def get_number_of_spins(self):
return 1
def get_fermi_level(self):
return 0.0
def get_pseudo_density(self):
n = 0.0
for w, eps, psi in zip(self.weights, self.eps[:, 0], self.psi):
if eps >= 0.0:
continue
n += w * (psi * psi.conj()).real
n[1:] += n[:0:-1].copy()
n[:, 1:] += n[:, :0:-1].copy()
n += n.transpose((1, 0, 2)).copy()
n /= 8
return n
class TestPotential(Calculator):
implemented_properties = ['energy', 'forces']
def calculate(self, atoms, properties, system_changes):
Calculator.calculate(self, atoms, properties, system_changes)
E = 0.0
R = atoms.positions
F = np.zeros_like(R)
for a, r in enumerate(R):
D = R - r
d = (D**2).sum(1)**0.5
x = d - 1.0
E += np.vdot(x, x)
d[a] = 1
F -= (x / d)[:, None] * D
energy = 0.25 * E
self.results = {'energy': energy, 'forces': F}
def numeric_force(atoms, a, i, d=0.001):
"""Evaluate force along i'th axis on a'th atom using finite difference.
This will trigger two calls to get_potential_energy(), with atom a moved
plus/minus d in the i'th axial direction, respectively.
"""
p0 = atoms.positions[a, i]
atoms.positions[a, i] += d
eplus = atoms.get_potential_energy()
atoms.positions[a, i] -= 2 * d
eminus = atoms.get_potential_energy()
atoms.positions[a, i] = p0
return (eminus - eplus) / (2 * d)
|
suttond/MODOI
|
ase/calculators/test.py
|
Python
|
lgpl-3.0
| 3,985
|
[
"ASE"
] |
b2e9b0c024dd4ddce2f871591d1d9af149a539c9236989f591c1ce16521a1eae
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Collection of modules for dealing with biological data in Python.
The Biopython Project is an international association of developers
of freely available Python tools for computational molecular biology.
http://biopython.org
"""
class MissingExternalDependencyError(Exception):
pass
def _load_registries():
import sys, os
from Bio.config.Registry import Registry
if getattr(sys, "version_info", (1, 5))[:2] < (2, 1):
return
self = sys.modules[__name__] # self refers to this module.
# Load the registries. Look in all the '.py' files in Bio.config
# for Registry objects. Save them all into the local namespace.
# Import code changed to allow for compilation with py2exe from distutils
# import Bio.config
config_imports = __import__("Bio.config", {}, {}, ["Bio"])
# in a zipfile
if hasattr(config_imports, '__loader__'):
zipfiles = __import__("Bio.config", {}, {}, ["Bio"]).__loader__._files
# Get only Bio.config modules
x = [zipfiles[file][0] for file in zipfiles.keys() \
if 'Bio\\config' in file]
x = [name.split("\\")[-1] for name in x] # Get module name
x = map(lambda x: x[:-4], x) # chop off '.pyc'
# not in a zipfile, get files normally
else:
x = os.listdir(os.path.dirname(config_imports.__file__))
x = filter(lambda x: not x.startswith("_") and x.endswith(".py"), x)
x = map(lambda x: x[:-3], x) # chop off '.py'
for module in x:
module = __import__("Bio.config.%s" % module, {}, {}, ["Bio","config"])
for name, obj in module.__dict__.items():
if name.startswith("_") or not isinstance(obj, Registry):
continue
setattr(self, name, obj)
# Put the registry loading code in a function so we don't polute the
# module namespace with local variables.
# WARNING:
# The call to _load_registries is being skipped as part of deprecating
# Bio.expressions, which does not function properly with the new version
# of mxTextTools. If at some point we decide to revive Bio.expressions,
# this line should be reinstated.
# _load_registries()
del _load_registries
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/__init__.py
|
Python
|
apache-2.0
| 2,429
|
[
"Biopython"
] |
54c0052e040c42e51276314076b3885ff22faac4fc27fe6b1747ff26a822d34c
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import session, msgprint
from webnotes.utils import today
from utilities.transaction_base import TransactionBase
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def validate(self):
if session['user'] != 'Guest' and not self.doc.customer:
msgprint("Please select Customer from whom issue is raised",
raise_exception=True)
if self.doc.status=="Closed" and \
webnotes.conn.get_value("Customer Issue", self.doc.name, "status")!="Closed":
self.doc.resolution_date = today()
self.doc.resolved_by = webnotes.session.user
def on_cancel(self):
lst = webnotes.conn.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t2.prevdoc_docname = '%s' and t1.docstatus!=2"%(self.doc.name))
if lst:
lst1 = ','.join([x[0] for x in lst])
msgprint("Maintenance Visit No. "+lst1+" already created against this customer issue. So can not be Cancelled")
raise Exception
else:
webnotes.conn.set(self.doc, 'status', 'Cancelled')
def on_update(self):
pass
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
visit = webnotes.conn.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doclist("Customer Issue", source_name, {
"Customer Issue": {
"doctype": "Maintenance Visit",
"field_map": {
"complaint": "description",
"doctype": "prevdoc_doctype",
"name": "prevdoc_docname"
}
}
}, target_doclist)
return [d.fields for d in doclist]
|
saurabh6790/test-med-app
|
support/doctype/customer_issue/customer_issue.py
|
Python
|
agpl-3.0
| 2,037
|
[
"VisIt"
] |
4bd4447c06af664513aa4935162f597e253a025c70177cd173ebbb16aeeec34e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.